mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-05-06 21:32:49 +00:00
Merge remote-tracking branch 'upstream/devel' into ec2_util_boto3
This commit is contained in:
@@ -19,5 +19,5 @@
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
__version__ = '2.0.0'
|
||||
__version__ = '2.1.0'
|
||||
__author__ = 'Ansible, Inc.'
|
||||
|
||||
@@ -32,7 +32,7 @@ import subprocess
|
||||
from ansible import __version__
|
||||
from ansible import constants as C
|
||||
from ansible.errors import AnsibleError, AnsibleOptionsError
|
||||
from ansible.utils.unicode import to_bytes
|
||||
from ansible.utils.unicode import to_bytes, to_unicode
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
@@ -66,7 +66,7 @@ class CLI(object):
|
||||
LESS_OPTS = 'FRSX' # -F (quit-if-one-screen) -R (allow raw ansi control chars)
|
||||
# -S (chop long lines) -X (disable termcap init and de-init)
|
||||
|
||||
def __init__(self, args):
|
||||
def __init__(self, args, callback=None):
|
||||
"""
|
||||
Base init method for all command line programs
|
||||
"""
|
||||
@@ -75,6 +75,7 @@ class CLI(object):
|
||||
self.options = None
|
||||
self.parser = None
|
||||
self.action = None
|
||||
self.callback = callback
|
||||
|
||||
def set_action(self):
|
||||
"""
|
||||
@@ -104,9 +105,9 @@ class CLI(object):
|
||||
|
||||
if self.options.verbosity > 0:
|
||||
if C.CONFIG_FILE:
|
||||
display.display("Using %s as config file" % C.CONFIG_FILE)
|
||||
display.display(u"Using %s as config file" % to_unicode(C.CONFIG_FILE))
|
||||
else:
|
||||
display.display("No config file found; using defaults")
|
||||
display.display(u"No config file found; using defaults")
|
||||
|
||||
@staticmethod
|
||||
def ask_vault_passwords(ask_new_vault_pass=False, rekey=False):
|
||||
@@ -191,12 +192,9 @@ class CLI(object):
|
||||
|
||||
if runas_opts:
|
||||
# Check for privilege escalation conflicts
|
||||
if (op.su or op.su_user or op.ask_su_pass) and \
|
||||
(op.sudo or op.sudo_user or op.ask_sudo_pass) or \
|
||||
(op.su or op.su_user or op.ask_su_pass) and \
|
||||
(op.become or op.become_user or op.become_ask_pass) or \
|
||||
(op.sudo or op.sudo_user or op.ask_sudo_pass) and \
|
||||
(op.become or op.become_user or op.become_ask_pass):
|
||||
if (op.su or op.su_user) and (op.sudo or op.sudo_user) or \
|
||||
(op.su or op.su_user) and (op.become or op.become_user) or \
|
||||
(op.sudo or op.sudo_user) and (op.become or op.become_user):
|
||||
|
||||
self.parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') "
|
||||
"and su arguments ('-su', '--su-user', and '--ask-su-pass') "
|
||||
@@ -213,7 +211,7 @@ class CLI(object):
|
||||
|
||||
@staticmethod
|
||||
def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, runtask_opts=False, vault_opts=False, module_opts=False,
|
||||
async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, inventory_opts=False, epilog=None, fork_opts=False):
|
||||
async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, inventory_opts=False, epilog=None, fork_opts=False, runas_prompt_opts=False):
|
||||
''' create an options parser for most ansible scripts '''
|
||||
|
||||
# TODO: implement epilog parsing
|
||||
@@ -246,14 +244,15 @@ class CLI(object):
|
||||
help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS)
|
||||
|
||||
if vault_opts:
|
||||
parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true',
|
||||
parser.add_option('--ask-vault-pass', default=C.DEFAULT_ASK_VAULT_PASS, dest='ask_vault_pass', action='store_true',
|
||||
help='ask for vault password')
|
||||
parser.add_option('--vault-password-file', default=C.DEFAULT_VAULT_PASSWORD_FILE, dest='vault_password_file',
|
||||
help="vault password file", action="callback", callback=CLI.expand_tilde, type=str)
|
||||
parser.add_option('--new-vault-password-file', dest='new_vault_password_file',
|
||||
help="new vault password file for rekey", action="callback", callback=CLI.expand_tilde, type=str)
|
||||
parser.add_option('--output', default=None, dest='output_file',
|
||||
help='output file name for encrypt or decrypt; use - for stdout')
|
||||
help='output file name for encrypt or decrypt; use - for stdout',
|
||||
action="callback", callback=CLI.expand_tilde, type=str)
|
||||
|
||||
if subset_opts:
|
||||
parser.add_option('-t', '--tags', dest='tags', default='all',
|
||||
@@ -269,10 +268,6 @@ class CLI(object):
|
||||
|
||||
if runas_opts:
|
||||
# priv user defaults to root later on to enable detecting when this option was given here
|
||||
parser.add_option('-K', '--ask-sudo-pass', default=C.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true',
|
||||
help='ask for sudo password (deprecated, use become)')
|
||||
parser.add_option('--ask-su-pass', default=C.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true',
|
||||
help='ask for su password (deprecated, use become)')
|
||||
parser.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true", dest='sudo',
|
||||
help="run operations with sudo (nopasswd) (deprecated, use become)")
|
||||
parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None,
|
||||
@@ -289,6 +284,12 @@ class CLI(object):
|
||||
help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (C.DEFAULT_BECOME_METHOD, ' | '.join(C.BECOME_METHODS)))
|
||||
parser.add_option('--become-user', default=None, dest='become_user', type='string',
|
||||
help='run operations as this user (default=%s)' % C.DEFAULT_BECOME_USER)
|
||||
|
||||
if runas_opts or runas_prompt_opts:
|
||||
parser.add_option('-K', '--ask-sudo-pass', default=C.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true',
|
||||
help='ask for sudo password (deprecated, use become)')
|
||||
parser.add_option('--ask-su-pass', default=C.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true',
|
||||
help='ask for su password (deprecated, use become)')
|
||||
parser.add_option('--ask-become-pass', default=False, dest='become_ask_pass', action='store_true',
|
||||
help='ask for privilege escalation password')
|
||||
|
||||
|
||||
@@ -70,7 +70,7 @@ class AdHocCLI(CLI):
|
||||
help="module name to execute (default=%s)" % C.DEFAULT_MODULE_NAME,
|
||||
default=C.DEFAULT_MODULE_NAME)
|
||||
|
||||
self.options, self.args = self.parser.parse_args()
|
||||
self.options, self.args = self.parser.parse_args(self.args[1:])
|
||||
|
||||
if len(self.args) != 1:
|
||||
raise AnsibleOptionsError("Missing target hosts")
|
||||
@@ -124,17 +124,13 @@ class AdHocCLI(CLI):
|
||||
inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=self.options.inventory)
|
||||
variable_manager.set_inventory(inventory)
|
||||
|
||||
hosts = inventory.list_hosts(pattern)
|
||||
no_hosts = False
|
||||
if len(hosts) == 0:
|
||||
display.warning("provided hosts list is empty, only localhost is available")
|
||||
no_hosts = True
|
||||
|
||||
if self.options.subset:
|
||||
inventory.subset(self.options.subset)
|
||||
if len(inventory.list_hosts(pattern)) == 0 and not no_hosts:
|
||||
# Invalid limit
|
||||
raise AnsibleError("Specified --limit does not match any hosts")
|
||||
|
||||
hosts = inventory.list_hosts(pattern)
|
||||
if len(hosts) == 0:
|
||||
raise AnsibleError("Specified hosts options do not match any hosts")
|
||||
|
||||
if self.options.listhosts:
|
||||
display.display(' hosts (%d):' % len(hosts))
|
||||
@@ -158,14 +154,18 @@ class AdHocCLI(CLI):
|
||||
play_ds = self._play_ds(pattern, self.options.seconds, self.options.poll_interval)
|
||||
play = Play().load(play_ds, variable_manager=variable_manager, loader=loader)
|
||||
|
||||
if self.options.one_line:
|
||||
if self.callback:
|
||||
cb = self.callback
|
||||
elif self.options.one_line:
|
||||
cb = 'oneline'
|
||||
else:
|
||||
cb = 'minimal'
|
||||
|
||||
run_tree=False
|
||||
if self.options.tree:
|
||||
C.DEFAULT_CALLBACK_WHITELIST.append('tree')
|
||||
C.TREE_DIR = self.options.tree
|
||||
run_tree=True
|
||||
|
||||
# now create a task queue manager to execute the play
|
||||
self._tqm = None
|
||||
@@ -177,6 +177,8 @@ class AdHocCLI(CLI):
|
||||
options=self.options,
|
||||
passwords=passwords,
|
||||
stdout_callback=cb,
|
||||
run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS,
|
||||
run_tree=run_tree,
|
||||
)
|
||||
result = self._tqm.run(play)
|
||||
finally:
|
||||
|
||||
@@ -62,7 +62,7 @@ class DocCLI(CLI):
|
||||
self.parser.add_option("-s", "--snippet", action="store_true", default=False, dest='show_snippet',
|
||||
help='Show playbook snippet for specified module(s)')
|
||||
|
||||
self.options, self.args = self.parser.parse_args()
|
||||
self.options, self.args = self.parser.parse_args(self.args[1:])
|
||||
display.verbosity = self.options.verbosity
|
||||
|
||||
def run(self):
|
||||
@@ -90,7 +90,8 @@ class DocCLI(CLI):
|
||||
for module in self.args:
|
||||
|
||||
try:
|
||||
filename = module_loader.find_plugin(module)
|
||||
# if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
|
||||
filename = module_loader.find_plugin(module, mod_type='.py')
|
||||
if filename is None:
|
||||
display.warning("module %s not found in %s\n" % (module, DocCLI.print_paths(module_loader)))
|
||||
continue
|
||||
@@ -167,7 +168,8 @@ class DocCLI(CLI):
|
||||
if module in module_docs.BLACKLIST_MODULES:
|
||||
continue
|
||||
|
||||
filename = module_loader.find_plugin(module)
|
||||
# if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
|
||||
filename = module_loader.find_plugin(module, mod_type='.py')
|
||||
|
||||
if filename is None:
|
||||
continue
|
||||
|
||||
@@ -22,10 +22,10 @@
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
import os.path
|
||||
import sys
|
||||
import yaml
|
||||
import time
|
||||
|
||||
from collections import defaultdict
|
||||
from jinja2 import Environment
|
||||
@@ -36,6 +36,8 @@ from ansible.errors import AnsibleError, AnsibleOptionsError
|
||||
from ansible.galaxy import Galaxy
|
||||
from ansible.galaxy.api import GalaxyAPI
|
||||
from ansible.galaxy.role import GalaxyRole
|
||||
from ansible.galaxy.login import GalaxyLogin
|
||||
from ansible.galaxy.token import GalaxyToken
|
||||
from ansible.playbook.role.requirement import RoleRequirement
|
||||
|
||||
try:
|
||||
@@ -44,14 +46,12 @@ except ImportError:
|
||||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
|
||||
class GalaxyCLI(CLI):
|
||||
|
||||
VALID_ACTIONS = ("init", "info", "install", "list", "remove", "search")
|
||||
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" )
|
||||
|
||||
VALID_ACTIONS = ("delete", "import", "info", "init", "install", "list", "login", "remove", "search", "setup")
|
||||
|
||||
def __init__(self, args):
|
||||
|
||||
self.api = None
|
||||
self.galaxy = None
|
||||
super(GalaxyCLI, self).__init__(args)
|
||||
@@ -67,7 +67,17 @@ class GalaxyCLI(CLI):
|
||||
self.set_action()
|
||||
|
||||
# options specific to actions
|
||||
if self.action == "info":
|
||||
if self.action == "delete":
|
||||
self.parser.set_usage("usage: %prog delete [options] github_user github_repo")
|
||||
elif self.action == "import":
|
||||
self.parser.set_usage("usage: %prog import [options] github_user github_repo")
|
||||
self.parser.add_option('--no-wait', dest='wait', action='store_false', default=True,
|
||||
help='Don\'t wait for import results.')
|
||||
self.parser.add_option('--branch', dest='reference',
|
||||
help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)')
|
||||
self.parser.add_option('--status', dest='check_status', action='store_true', default=False,
|
||||
help='Check the status of the most recent import request for given github_user/github_repo.')
|
||||
elif self.action == "info":
|
||||
self.parser.set_usage("usage: %prog info [options] role_name[,version]")
|
||||
elif self.action == "init":
|
||||
self.parser.set_usage("usage: %prog init [options] role_name")
|
||||
@@ -88,31 +98,42 @@ class GalaxyCLI(CLI):
|
||||
self.parser.set_usage("usage: %prog remove role1 role2 ...")
|
||||
elif self.action == "list":
|
||||
self.parser.set_usage("usage: %prog list [role_name]")
|
||||
elif self.action == "login":
|
||||
self.parser.set_usage("usage: %prog login [options]")
|
||||
self.parser.add_option('--github-token', dest='token', default=None,
|
||||
help='Identify with github token rather than username and password.')
|
||||
elif self.action == "search":
|
||||
self.parser.add_option('--platforms', dest='platforms',
|
||||
help='list of OS platforms to filter by')
|
||||
self.parser.add_option('--galaxy-tags', dest='tags',
|
||||
help='list of galaxy tags to filter by')
|
||||
self.parser.set_usage("usage: %prog search [<search_term>] [--galaxy-tags <galaxy_tag1,galaxy_tag2>] [--platforms platform]")
|
||||
self.parser.add_option('--author', dest='author',
|
||||
help='GitHub username')
|
||||
self.parser.set_usage("usage: %prog search [searchterm1 searchterm2] [--galaxy-tags galaxy_tag1,galaxy_tag2] [--platforms platform1,platform2] [--author username]")
|
||||
elif self.action == "setup":
|
||||
self.parser.set_usage("usage: %prog setup [options] source github_user github_repo secret")
|
||||
self.parser.add_option('--remove', dest='remove_id', default=None,
|
||||
help='Remove the integration matching the provided ID value. Use --list to see ID values.')
|
||||
self.parser.add_option('--list', dest="setup_list", action='store_true', default=False,
|
||||
help='List all of your integrations.')
|
||||
|
||||
# options that apply to more than one action
|
||||
if self.action != "init":
|
||||
if not self.action in ("delete","import","init","login","setup"):
|
||||
self.parser.add_option('-p', '--roles-path', dest='roles_path', default=C.DEFAULT_ROLES_PATH,
|
||||
help='The path to the directory containing your roles. '
|
||||
'The default is the roles_path configured in your '
|
||||
'ansible.cfg file (/etc/ansible/roles if not configured)')
|
||||
|
||||
if self.action in ("info","init","install","search"):
|
||||
self.parser.add_option('-s', '--server', dest='api_server', default="https://galaxy.ansible.com",
|
||||
if self.action in ("import","info","init","install","login","search","setup","delete"):
|
||||
self.parser.add_option('-s', '--server', dest='api_server', default=C.GALAXY_SERVER,
|
||||
help='The API server destination')
|
||||
self.parser.add_option('-c', '--ignore-certs', action='store_false', dest='validate_certs', default=True,
|
||||
self.parser.add_option('-c', '--ignore-certs', action='store_true', dest='ignore_certs', default=False,
|
||||
help='Ignore SSL certificate validation errors.')
|
||||
|
||||
if self.action in ("init","install"):
|
||||
self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False,
|
||||
help='Force overwriting an existing role')
|
||||
|
||||
# get options, args and galaxy object
|
||||
self.options, self.args =self.parser.parse_args()
|
||||
display.verbosity = self.options.verbosity
|
||||
self.galaxy = Galaxy(self.options)
|
||||
@@ -120,15 +141,13 @@ class GalaxyCLI(CLI):
|
||||
return True
|
||||
|
||||
def run(self):
|
||||
|
||||
|
||||
super(GalaxyCLI, self).run()
|
||||
|
||||
# if not offline, get connect to galaxy api
|
||||
if self.action in ("info","install", "search") or (self.action == 'init' and not self.options.offline):
|
||||
api_server = self.options.api_server
|
||||
self.api = GalaxyAPI(self.galaxy, api_server)
|
||||
if not self.api:
|
||||
raise AnsibleError("The API server (%s) is not responding, please try again later." % api_server)
|
||||
if self.action in ("import","info","install","search","login","setup","delete") or \
|
||||
(self.action == 'init' and not self.options.offline):
|
||||
self.api = GalaxyAPI(self.galaxy)
|
||||
|
||||
self.execute()
|
||||
|
||||
@@ -188,7 +207,7 @@ class GalaxyCLI(CLI):
|
||||
"however it will reset any main.yml files that may have\n"
|
||||
"been modified there already." % role_path)
|
||||
|
||||
# create the default README.md
|
||||
# create default README.md
|
||||
if not os.path.exists(role_path):
|
||||
os.makedirs(role_path)
|
||||
readme_path = os.path.join(role_path, "README.md")
|
||||
@@ -196,9 +215,16 @@ class GalaxyCLI(CLI):
|
||||
f.write(self.galaxy.default_readme)
|
||||
f.close()
|
||||
|
||||
# create default .travis.yml
|
||||
travis = Environment().from_string(self.galaxy.default_travis).render()
|
||||
f = open(os.path.join(role_path, '.travis.yml'), 'w')
|
||||
f.write(travis)
|
||||
f.close()
|
||||
|
||||
for dir in GalaxyRole.ROLE_DIRS:
|
||||
dir_path = os.path.join(init_path, role_name, dir)
|
||||
main_yml_path = os.path.join(dir_path, 'main.yml')
|
||||
|
||||
# create the directory if it doesn't exist already
|
||||
if not os.path.exists(dir_path):
|
||||
os.makedirs(dir_path)
|
||||
@@ -234,6 +260,20 @@ class GalaxyCLI(CLI):
|
||||
f.write(rendered_meta)
|
||||
f.close()
|
||||
pass
|
||||
elif dir == "tests":
|
||||
# create tests/test.yml
|
||||
inject = dict(
|
||||
role_name = role_name
|
||||
)
|
||||
playbook = Environment().from_string(self.galaxy.default_test).render(inject)
|
||||
f = open(os.path.join(dir_path, 'test.yml'), 'w')
|
||||
f.write(playbook)
|
||||
f.close()
|
||||
|
||||
# create tests/inventory
|
||||
f = open(os.path.join(dir_path, 'inventory'), 'w')
|
||||
f.write('localhost')
|
||||
f.close()
|
||||
elif dir not in ('files','templates'):
|
||||
# just write a (mostly) empty YAML file for main.yml
|
||||
f = open(main_yml_path, 'w')
|
||||
@@ -325,7 +365,7 @@ class GalaxyCLI(CLI):
|
||||
|
||||
for role in required_roles:
|
||||
role = RoleRequirement.role_yaml_parse(role)
|
||||
display.debug('found role %s in yaml file' % str(role))
|
||||
display.vvv('found role %s in yaml file' % str(role))
|
||||
if 'name' not in role and 'scm' not in role:
|
||||
raise AnsibleError("Must specify name or src for role")
|
||||
roles_left.append(GalaxyRole(self.galaxy, **role))
|
||||
@@ -348,7 +388,7 @@ class GalaxyCLI(CLI):
|
||||
roles_left.append(GalaxyRole(self.galaxy, rname.strip()))
|
||||
|
||||
for role in roles_left:
|
||||
display.debug('Installing role %s ' % role.name)
|
||||
display.vvv('Installing role %s ' % role.name)
|
||||
# query the galaxy API for the role data
|
||||
|
||||
if role.install_info is not None and not force:
|
||||
@@ -458,21 +498,187 @@ class GalaxyCLI(CLI):
|
||||
return 0
|
||||
|
||||
def execute_search(self):
|
||||
|
||||
page_size = 1000
|
||||
search = None
|
||||
if len(self.args) > 1:
|
||||
raise AnsibleOptionsError("At most a single search term is allowed.")
|
||||
elif len(self.args) == 1:
|
||||
search = self.args.pop()
|
||||
|
||||
response = self.api.search_roles(search, self.options.platforms, self.options.tags)
|
||||
if len(self.args):
|
||||
terms = []
|
||||
for i in range(len(self.args)):
|
||||
terms.append(self.args.pop())
|
||||
search = '+'.join(terms[::-1])
|
||||
|
||||
if 'count' in response:
|
||||
display.display("Found %d roles matching your search:\n" % response['count'])
|
||||
if not search and not self.options.platforms and not self.options.tags and not self.options.author:
|
||||
raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.")
|
||||
|
||||
response = self.api.search_roles(search, platforms=self.options.platforms,
|
||||
tags=self.options.tags, author=self.options.author, page_size=page_size)
|
||||
|
||||
if response['count'] == 0:
|
||||
display.display("No roles match your search.", color=C.COLOR_ERROR)
|
||||
return True
|
||||
|
||||
data = ''
|
||||
if 'results' in response:
|
||||
for role in response['results']:
|
||||
data += self._display_role_info(role)
|
||||
|
||||
if response['count'] > page_size:
|
||||
data += ("\nFound %d roles matching your search. Showing first %s.\n" % (response['count'], page_size))
|
||||
else:
|
||||
data += ("\nFound %d roles matching your search:\n" % response['count'])
|
||||
|
||||
max_len = []
|
||||
for role in response['results']:
|
||||
max_len.append(len(role['username'] + '.' + role['name']))
|
||||
name_len = max(max_len)
|
||||
format_str = " %%-%ds %%s\n" % name_len
|
||||
data +='\n'
|
||||
data += (format_str % ("Name", "Description"))
|
||||
data += (format_str % ("----", "-----------"))
|
||||
for role in response['results']:
|
||||
data += (format_str % (role['username'] + '.' + role['name'],role['description']))
|
||||
|
||||
self.pager(data)
|
||||
|
||||
return True
|
||||
|
||||
def execute_login(self):
|
||||
"""
|
||||
Verify user's identify via Github and retreive an auth token from Galaxy.
|
||||
"""
|
||||
# Authenticate with github and retrieve a token
|
||||
if self.options.token is None:
|
||||
login = GalaxyLogin(self.galaxy)
|
||||
github_token = login.create_github_token()
|
||||
else:
|
||||
github_token = self.options.token
|
||||
|
||||
galaxy_response = self.api.authenticate(github_token)
|
||||
|
||||
if self.options.token is None:
|
||||
# Remove the token we created
|
||||
login.remove_github_token()
|
||||
|
||||
# Store the Galaxy token
|
||||
token = GalaxyToken()
|
||||
token.set(galaxy_response['token'])
|
||||
|
||||
display.display("Succesfully logged into Galaxy as %s" % galaxy_response['username'])
|
||||
return 0
|
||||
|
||||
def execute_import(self):
|
||||
"""
|
||||
Import a role into Galaxy
|
||||
"""
|
||||
|
||||
colors = {
|
||||
'INFO': 'normal',
|
||||
'WARNING': C.COLOR_WARN,
|
||||
'ERROR': C.COLOR_ERROR,
|
||||
'SUCCESS': C.COLOR_OK,
|
||||
'FAILED': C.COLOR_ERROR,
|
||||
}
|
||||
|
||||
if len(self.args) < 2:
|
||||
raise AnsibleError("Expected a github_username and github_repository. Use --help.")
|
||||
|
||||
github_repo = self.args.pop()
|
||||
github_user = self.args.pop()
|
||||
|
||||
if self.options.check_status:
|
||||
task = self.api.get_import_task(github_user=github_user, github_repo=github_repo)
|
||||
else:
|
||||
# Submit an import request
|
||||
task = self.api.create_import_task(github_user, github_repo, reference=self.options.reference)
|
||||
|
||||
if len(task) > 1:
|
||||
# found multiple roles associated with github_user/github_repo
|
||||
display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user,github_repo),
|
||||
color='yellow')
|
||||
display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED)
|
||||
for t in task:
|
||||
display.display('%s.%s' % (t['summary_fields']['role']['namespace'],t['summary_fields']['role']['name']), color=C.COLOR_CHANGED)
|
||||
display.display(u'\n' + "To properly namespace this role, remove each of the above and re-import %s/%s from scratch" % (github_user,github_repo), color=C.COLOR_CHANGED)
|
||||
return 0
|
||||
# found a single role as expected
|
||||
display.display("Successfully submitted import request %d" % task[0]['id'])
|
||||
if not self.options.wait:
|
||||
display.display("Role name: %s" % task[0]['summary_fields']['role']['name'])
|
||||
display.display("Repo: %s/%s" % (task[0]['github_user'],task[0]['github_repo']))
|
||||
|
||||
if self.options.check_status or self.options.wait:
|
||||
# Get the status of the import
|
||||
msg_list = []
|
||||
finished = False
|
||||
while not finished:
|
||||
task = self.api.get_import_task(task_id=task[0]['id'])
|
||||
for msg in task[0]['summary_fields']['task_messages']:
|
||||
if msg['id'] not in msg_list:
|
||||
display.display(msg['message_text'], color=colors[msg['message_type']])
|
||||
msg_list.append(msg['id'])
|
||||
if task[0]['state'] in ['SUCCESS', 'FAILED']:
|
||||
finished = True
|
||||
else:
|
||||
time.sleep(10)
|
||||
|
||||
return 0
|
||||
|
||||
def execute_setup(self):
|
||||
"""
|
||||
Setup an integration from Github or Travis
|
||||
"""
|
||||
|
||||
if self.options.setup_list:
|
||||
# List existing integration secrets
|
||||
secrets = self.api.list_secrets()
|
||||
if len(secrets) == 0:
|
||||
# None found
|
||||
display.display("No integrations found.")
|
||||
return 0
|
||||
display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK)
|
||||
display.display("---------- ---------- ----------", color=C.COLOR_OK)
|
||||
for secret in secrets:
|
||||
display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'],
|
||||
secret['github_repo']),color=C.COLOR_OK)
|
||||
return 0
|
||||
|
||||
if self.options.remove_id:
|
||||
# Remove a secret
|
||||
self.api.remove_secret(self.options.remove_id)
|
||||
display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK)
|
||||
return 0
|
||||
|
||||
if len(self.args) < 4:
|
||||
raise AnsibleError("Missing one or more arguments. Expecting: source github_user github_repo secret")
|
||||
return 0
|
||||
|
||||
secret = self.args.pop()
|
||||
github_repo = self.args.pop()
|
||||
github_user = self.args.pop()
|
||||
source = self.args.pop()
|
||||
|
||||
resp = self.api.add_secret(source, github_user, github_repo, secret)
|
||||
display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo']))
|
||||
|
||||
return 0
|
||||
|
||||
def execute_delete(self):
|
||||
"""
|
||||
Delete a role from galaxy.ansible.com
|
||||
"""
|
||||
|
||||
if len(self.args) < 2:
|
||||
raise AnsibleError("Missing one or more arguments. Expected: github_user github_repo")
|
||||
|
||||
github_repo = self.args.pop()
|
||||
github_user = self.args.pop()
|
||||
resp = self.api.delete_role(github_user, github_repo)
|
||||
|
||||
if len(resp['deleted_roles']) > 1:
|
||||
display.display("Deleted the following roles:")
|
||||
display.display("ID User Name")
|
||||
display.display("------ --------------- ----------")
|
||||
for role in resp['deleted_roles']:
|
||||
display.display("%-8s %-15s %s" % (role.id,role.namespace,role.name))
|
||||
|
||||
display.display(resp['status'])
|
||||
|
||||
return True
|
||||
|
||||
|
||||
@@ -30,6 +30,7 @@ from ansible.errors import AnsibleError, AnsibleOptionsError
|
||||
from ansible.executor.playbook_executor import PlaybookExecutor
|
||||
from ansible.inventory import Inventory
|
||||
from ansible.parsing.dataloader import DataLoader
|
||||
from ansible.playbook.play_context import PlayContext
|
||||
from ansible.utils.vars import load_extra_vars
|
||||
from ansible.vars import VariableManager
|
||||
|
||||
@@ -72,7 +73,7 @@ class PlaybookCLI(CLI):
|
||||
parser.add_option('--start-at-task', dest='start_at_task',
|
||||
help="start the playbook at the task matching this name")
|
||||
|
||||
self.options, self.args = parser.parse_args()
|
||||
self.options, self.args = parser.parse_args(self.args[1:])
|
||||
|
||||
|
||||
self.parser = parser
|
||||
@@ -152,18 +153,10 @@ class PlaybookCLI(CLI):
|
||||
for p in results:
|
||||
|
||||
display.display('\nplaybook: %s' % p['playbook'])
|
||||
i = 1
|
||||
for play in p['plays']:
|
||||
if play.name:
|
||||
playname = play.name
|
||||
else:
|
||||
playname = '#' + str(i)
|
||||
|
||||
msg = "\n PLAY: %s" % (playname)
|
||||
mytags = set()
|
||||
if self.options.listtags and play.tags:
|
||||
mytags = mytags.union(set(play.tags))
|
||||
msg += ' TAGS: [%s]' % (','.join(mytags))
|
||||
for idx, play in enumerate(p['plays']):
|
||||
msg = "\n play #%d (%s): %s" % (idx + 1, ','.join(play.hosts), play.name)
|
||||
mytags = set(play.tags)
|
||||
msg += '\tTAGS: [%s]' % (','.join(mytags))
|
||||
|
||||
if self.options.listhosts:
|
||||
playhosts = set(inventory.get_hosts(play.hosts))
|
||||
@@ -173,23 +166,40 @@ class PlaybookCLI(CLI):
|
||||
|
||||
display.display(msg)
|
||||
|
||||
all_tags = set()
|
||||
if self.options.listtags or self.options.listtasks:
|
||||
taskmsg = ' tasks:'
|
||||
taskmsg = ''
|
||||
if self.options.listtasks:
|
||||
taskmsg = ' tasks:\n'
|
||||
|
||||
all_vars = variable_manager.get_vars(loader=loader, play=play)
|
||||
play_context = PlayContext(play=play, options=self.options)
|
||||
for block in play.compile():
|
||||
block = block.filter_tagged_tasks(play_context, all_vars)
|
||||
if not block.has_tasks():
|
||||
continue
|
||||
|
||||
j = 1
|
||||
for task in block.block:
|
||||
taskmsg += "\n %s" % task
|
||||
if self.options.listtags and task.tags:
|
||||
taskmsg += " TAGS: [%s]" % ','.join(mytags.union(set(task.tags)))
|
||||
j = j + 1
|
||||
if task.action == 'meta':
|
||||
continue
|
||||
|
||||
all_tags.update(task.tags)
|
||||
if self.options.listtasks:
|
||||
cur_tags = list(mytags.union(set(task.tags)))
|
||||
cur_tags.sort()
|
||||
if task.name:
|
||||
taskmsg += " %s" % task.get_name()
|
||||
else:
|
||||
taskmsg += " %s" % task.action
|
||||
taskmsg += "\tTAGS: [%s]\n" % ', '.join(cur_tags)
|
||||
|
||||
if self.options.listtags:
|
||||
cur_tags = list(mytags.union(all_tags))
|
||||
cur_tags.sort()
|
||||
taskmsg += " TASK TAGS: [%s]\n" % ', '.join(cur_tags)
|
||||
|
||||
display.display(taskmsg)
|
||||
|
||||
i = i + 1
|
||||
return 0
|
||||
else:
|
||||
return results
|
||||
|
||||
@@ -64,18 +64,24 @@ class PullCLI(CLI):
|
||||
subset_opts=True,
|
||||
inventory_opts=True,
|
||||
module_opts=True,
|
||||
runas_prompt_opts=True,
|
||||
)
|
||||
|
||||
# options unique to pull
|
||||
self.parser.add_option('--purge', default=False, action='store_true', help='purge checkout after playbook run')
|
||||
self.parser.add_option('--purge', default=False, action='store_true',
|
||||
help='purge checkout after playbook run')
|
||||
self.parser.add_option('-o', '--only-if-changed', dest='ifchanged', default=False, action='store_true',
|
||||
help='only run the playbook if the repository has been updated')
|
||||
self.parser.add_option('-s', '--sleep', dest='sleep', default=None,
|
||||
help='sleep for random interval (between 0 and n number of seconds) before starting. This is a useful way to disperse git requests')
|
||||
self.parser.add_option('-f', '--force', dest='force', default=False, action='store_true',
|
||||
help='run the playbook even if the repository could not be updated')
|
||||
self.parser.add_option('-d', '--directory', dest='dest', default='~/.ansible/pull', help='directory to checkout repository to')
|
||||
self.parser.add_option('-U', '--url', dest='url', default=None, help='URL of the playbook repository')
|
||||
self.parser.add_option('-d', '--directory', dest='dest', default=None,
|
||||
help='directory to checkout repository to')
|
||||
self.parser.add_option('-U', '--url', dest='url', default=None,
|
||||
help='URL of the playbook repository')
|
||||
self.parser.add_option('--full', dest='fullclone', action='store_true',
|
||||
help='Do a full clone, instead of a shallow one.')
|
||||
self.parser.add_option('-C', '--checkout', dest='checkout',
|
||||
help='branch/tag/commit to checkout. ' 'Defaults to behavior of repository module.')
|
||||
self.parser.add_option('--accept-host-key', default=False, dest='accept_host_key', action='store_true',
|
||||
@@ -86,7 +92,13 @@ class PullCLI(CLI):
|
||||
help='verify GPG signature of checked out commit, if it fails abort running the playbook.'
|
||||
' This needs the corresponding VCS module to support such an operation')
|
||||
|
||||
self.options, self.args = self.parser.parse_args()
|
||||
self.options, self.args = self.parser.parse_args(self.args[1:])
|
||||
|
||||
if not self.options.dest:
|
||||
hostname = socket.getfqdn()
|
||||
# use a hostname dependent directory, in case of $HOME on nfs
|
||||
self.options.dest = os.path.join('~/.ansible/pull', hostname)
|
||||
self.options.dest = os.path.expandvars(os.path.expanduser(self.options.dest))
|
||||
|
||||
if self.options.sleep:
|
||||
try:
|
||||
@@ -119,7 +131,7 @@ class PullCLI(CLI):
|
||||
node = platform.node()
|
||||
host = socket.getfqdn()
|
||||
limit_opts = 'localhost,%s,127.0.0.1' % ','.join(set([host, node, host.split('.')[0], node.split('.')[0]]))
|
||||
base_opts = '-c local "%s"' % limit_opts
|
||||
base_opts = '-c local '
|
||||
if self.options.verbosity > 0:
|
||||
base_opts += ' -%s' % ''.join([ "v" for x in range(0, self.options.verbosity) ])
|
||||
|
||||
@@ -130,7 +142,7 @@ class PullCLI(CLI):
|
||||
else:
|
||||
inv_opts = self.options.inventory
|
||||
|
||||
#TODO: enable more repo modules hg/svn?
|
||||
#FIXME: enable more repo modules hg/svn?
|
||||
if self.options.module_name == 'git':
|
||||
repo_opts = "name=%s dest=%s" % (self.options.url, self.options.dest)
|
||||
if self.options.checkout:
|
||||
@@ -145,13 +157,17 @@ class PullCLI(CLI):
|
||||
if self.options.verify:
|
||||
repo_opts += ' verify_commit=yes'
|
||||
|
||||
if not self.options.fullclone:
|
||||
repo_opts += ' depth=1'
|
||||
|
||||
|
||||
path = module_loader.find_plugin(self.options.module_name)
|
||||
if path is None:
|
||||
raise AnsibleOptionsError(("module '%s' not found.\n" % self.options.module_name))
|
||||
|
||||
bin_path = os.path.dirname(os.path.abspath(sys.argv[0]))
|
||||
cmd = '%s/ansible -i "%s" %s -m %s -a "%s"' % (
|
||||
bin_path, inv_opts, base_opts, self.options.module_name, repo_opts
|
||||
cmd = '%s/ansible -i "%s" %s -m %s -a "%s" "%s"' % (
|
||||
bin_path, inv_opts, base_opts, self.options.module_name, repo_opts, limit_opts
|
||||
)
|
||||
|
||||
for ev in self.options.extra_vars:
|
||||
@@ -163,6 +179,8 @@ class PullCLI(CLI):
|
||||
time.sleep(self.options.sleep)
|
||||
|
||||
# RUN the Checkout command
|
||||
display.debug("running ansible with VCS module to checkout repo")
|
||||
display.vvvv('EXEC: %s' % cmd)
|
||||
rc, out, err = run_cmd(cmd, live=True)
|
||||
|
||||
if rc != 0:
|
||||
@@ -174,8 +192,7 @@ class PullCLI(CLI):
|
||||
display.display("Repository has not changed, quitting.")
|
||||
return 0
|
||||
|
||||
playbook = self.select_playbook(path)
|
||||
|
||||
playbook = self.select_playbook(self.options.dest)
|
||||
if playbook is None:
|
||||
raise AnsibleOptionsError("Could not find a playbook to run.")
|
||||
|
||||
@@ -187,16 +204,18 @@ class PullCLI(CLI):
|
||||
cmd += ' -i "%s"' % self.options.inventory
|
||||
for ev in self.options.extra_vars:
|
||||
cmd += ' -e "%s"' % ev
|
||||
if self.options.ask_sudo_pass:
|
||||
cmd += ' -K'
|
||||
if self.options.ask_sudo_pass or self.options.ask_su_pass or self.options.become_ask_pass:
|
||||
cmd += ' --ask-become-pass'
|
||||
if self.options.tags:
|
||||
cmd += ' -t "%s"' % self.options.tags
|
||||
if self.options.limit:
|
||||
cmd += ' -l "%s"' % self.options.limit
|
||||
if self.options.subset:
|
||||
cmd += ' -l "%s"' % self.options.subset
|
||||
|
||||
os.chdir(self.options.dest)
|
||||
|
||||
# RUN THE PLAYBOOK COMMAND
|
||||
display.debug("running ansible-playbook to do actual work")
|
||||
display.debug('EXEC: %s' % cmd)
|
||||
rc, out, err = run_cmd(cmd, live=True)
|
||||
|
||||
if self.options.purge:
|
||||
|
||||
@@ -69,7 +69,7 @@ class VaultCLI(CLI):
|
||||
elif self.action == "rekey":
|
||||
self.parser.set_usage("usage: %prog rekey [options] file_name")
|
||||
|
||||
self.options, self.args = self.parser.parse_args()
|
||||
self.options, self.args = self.parser.parse_args(self.args[1:])
|
||||
display.verbosity = self.options.verbosity
|
||||
|
||||
can_output = ['encrypt', 'decrypt']
|
||||
|
||||
@@ -120,19 +120,23 @@ DEFAULT_COW_WHITELIST = ['bud-frogs', 'bunny', 'cheese', 'daemon', 'default', 'd
|
||||
# sections in config file
|
||||
DEFAULTS='defaults'
|
||||
|
||||
# FIXME: add deprecation warning when these get set
|
||||
#### DEPRECATED VARS ####
|
||||
# use more sanely named 'inventory'
|
||||
DEPRECATED_HOST_LIST = get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', '/etc/ansible/hosts', ispath=True)
|
||||
# this is not used since 0.5 but people might still have in config
|
||||
DEFAULT_PATTERN = get_config(p, DEFAULTS, 'pattern', None, None)
|
||||
|
||||
# generally configurable things
|
||||
#### GENERALLY CONFIGURABLE THINGS ####
|
||||
DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, boolean=True)
|
||||
DEFAULT_HOST_LIST = get_config(p, DEFAULTS,'inventory', 'ANSIBLE_INVENTORY', DEPRECATED_HOST_LIST, ispath=True)
|
||||
DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None, ispath=True)
|
||||
DEFAULT_ROLES_PATH = get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles', ispath=True)
|
||||
DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp')
|
||||
DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command')
|
||||
DEFAULT_PATTERN = get_config(p, DEFAULTS, 'pattern', None, '*')
|
||||
DEFAULT_FORKS = get_config(p, DEFAULTS, 'forks', 'ANSIBLE_FORKS', 5, integer=True)
|
||||
DEFAULT_MODULE_ARGS = get_config(p, DEFAULTS, 'module_args', 'ANSIBLE_MODULE_ARGS', '')
|
||||
DEFAULT_MODULE_LANG = get_config(p, DEFAULTS, 'module_lang', 'ANSIBLE_MODULE_LANG', 'en_US.UTF-8')
|
||||
DEFAULT_MODULE_LANG = get_config(p, DEFAULTS, 'module_lang', 'ANSIBLE_MODULE_LANG', os.getenv('LANG', 'en_US.UTF-8'))
|
||||
DEFAULT_TIMEOUT = get_config(p, DEFAULTS, 'timeout', 'ANSIBLE_TIMEOUT', 10, integer=True)
|
||||
DEFAULT_POLL_INTERVAL = get_config(p, DEFAULTS, 'poll_interval', 'ANSIBLE_POLL_INTERVAL', 15, integer=True)
|
||||
DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', None)
|
||||
@@ -159,7 +163,7 @@ DEFAULT_VAR_COMPRESSION_LEVEL = get_config(p, DEFAULTS, 'var_compression_level',
|
||||
|
||||
# disclosure
|
||||
DEFAULT_NO_LOG = get_config(p, DEFAULTS, 'no_log', 'ANSIBLE_NO_LOG', False, boolean=True)
|
||||
DEFAULT_NO_TARGET_SYSLOG = get_config(p, DEFAULTS, 'no_target_syslog', 'ANSIBLE_NO_TARGET_SYSLOG', True, boolean=True)
|
||||
DEFAULT_NO_TARGET_SYSLOG = get_config(p, DEFAULTS, 'no_target_syslog', 'ANSIBLE_NO_TARGET_SYSLOG', False, boolean=True)
|
||||
|
||||
# selinux
|
||||
DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf, ramfs', islist=True)
|
||||
@@ -197,7 +201,7 @@ DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pa
|
||||
# the module takes both, bad things could happen.
|
||||
# In the future we should probably generalize this even further
|
||||
# (mapping of param: squash field)
|
||||
DEFAULT_SQUASH_ACTIONS = get_config(p, DEFAULTS, 'squash_actions', 'ANSIBLE_SQUASH_ACTIONS', "apt, yum, pkgng, zypper, dnf", islist=True)
|
||||
DEFAULT_SQUASH_ACTIONS = get_config(p, DEFAULTS, 'squash_actions', 'ANSIBLE_SQUASH_ACTIONS', "apt, dnf, package, pkgng, yum, zypper", islist=True)
|
||||
# paths
|
||||
DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '~/.ansible/plugins/action:/usr/share/ansible/plugins/action', ispath=True)
|
||||
DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '~/.ansible/plugins/cache:/usr/share/ansible/plugins/cache', ispath=True)
|
||||
@@ -255,12 +259,25 @@ ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_k
|
||||
PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, boolean=True)
|
||||
|
||||
# galaxy related
|
||||
DEFAULT_GALAXY_URI = get_config(p, 'galaxy', 'server_uri', 'ANSIBLE_GALAXY_SERVER_URI', 'https://galaxy.ansible.com')
|
||||
GALAXY_SERVER = get_config(p, 'galaxy', 'server', 'ANSIBLE_GALAXY_SERVER', 'https://galaxy.ansible.com')
|
||||
GALAXY_IGNORE_CERTS = get_config(p, 'galaxy', 'ignore_certs', 'ANSIBLE_GALAXY_IGNORE', False, boolean=True)
|
||||
# this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated
|
||||
GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', 'git, hg', islist=True)
|
||||
|
||||
# characters included in auto-generated passwords
|
||||
DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_"
|
||||
STRING_TYPE_FILTERS = get_config(p, 'jinja2', 'dont_type_filters', 'ANSIBLE_STRING_TYPE_FILTERS', ['string', 'to_json', 'to_nice_json', 'to_yaml', 'ppretty', 'json'], islist=True )
|
||||
|
||||
# colors
|
||||
COLOR_VERBOSE = get_config(p, 'colors', 'verbose', 'ANSIBLE_COLOR_VERBOSE', 'blue')
|
||||
COLOR_WARN = get_config(p, 'colors', 'warn', 'ANSIBLE_COLOR_WARN', 'bright purple')
|
||||
COLOR_ERROR = get_config(p, 'colors', 'error', 'ANSIBLE_COLOR_ERROR', 'red')
|
||||
COLOR_DEBUG = get_config(p, 'colors', 'debug', 'ANSIBLE_COLOR_DEBUG', 'dark gray')
|
||||
COLOR_DEPRECATE = get_config(p, 'colors', 'deprecate', 'ANSIBLE_COLOR_DEPRECATE', 'purple')
|
||||
COLOR_SKIP = get_config(p, 'colors', 'skip', 'ANSIBLE_COLOR_SKIP', 'cyan')
|
||||
COLOR_UNREACHABLE = get_config(p, 'colors', 'unreachable', 'ANSIBLE_COLOR_UNREACHABLE', 'bright red')
|
||||
COLOR_OK = get_config(p, 'colors', 'ok', 'ANSIBLE_COLOR_OK', 'green')
|
||||
COLOR_CHANGED = get_config(p, 'colors', 'ok', 'ANSIBLE_COLOR_CHANGED', 'yellow')
|
||||
|
||||
# non-configurable things
|
||||
MODULE_REQUIRE_ARGS = ['command', 'shell', 'raw', 'script']
|
||||
|
||||
@@ -44,7 +44,7 @@ class AnsibleError(Exception):
|
||||
which should be returned by the DataLoader() class.
|
||||
'''
|
||||
|
||||
def __init__(self, message, obj=None, show_content=True):
|
||||
def __init__(self, message="", obj=None, show_content=True):
|
||||
# we import this here to prevent an import loop problem,
|
||||
# since the objects code also imports ansible.errors
|
||||
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
|
||||
@@ -54,9 +54,9 @@ class AnsibleError(Exception):
|
||||
if obj and isinstance(obj, AnsibleBaseYAMLObject):
|
||||
extended_error = self._get_extended_error()
|
||||
if extended_error:
|
||||
self.message = 'ERROR! %s\n\n%s' % (message, to_str(extended_error))
|
||||
self.message = '%s\n\n%s' % (to_str(message), to_str(extended_error))
|
||||
else:
|
||||
self.message = 'ERROR! %s' % message
|
||||
self.message = '%s' % to_str(message)
|
||||
|
||||
def __str__(self):
|
||||
return self.message
|
||||
|
||||
@@ -39,6 +39,7 @@ REPLACER_WINDOWS = "# POWERSHELL_COMMON"
|
||||
REPLACER_WINARGS = "<<INCLUDE_ANSIBLE_MODULE_WINDOWS_ARGS>>"
|
||||
REPLACER_JSONARGS = "<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>"
|
||||
REPLACER_VERSION = "\"<<ANSIBLE_VERSION>>\""
|
||||
REPLACER_SELINUX = "<<SELINUX_SPECIAL_FILESYSTEMS>>"
|
||||
|
||||
# We could end up writing out parameters with unicode characters so we need to
|
||||
# specify an encoding for the python source file
|
||||
@@ -172,6 +173,7 @@ def modify_module(module_path, module_args, task_vars=dict(), strip_comments=Fal
|
||||
module_data = module_data.replace(REPLACER_COMPLEX, python_repred_args)
|
||||
module_data = module_data.replace(REPLACER_WINARGS, module_args_json)
|
||||
module_data = module_data.replace(REPLACER_JSONARGS, module_args_json)
|
||||
module_data = module_data.replace(REPLACER_SELINUX, ','.join(C.DEFAULT_SELINUX_SPECIAL_FS))
|
||||
|
||||
if module_style == 'new':
|
||||
facility = C.DEFAULT_SYSLOG_FACILITY
|
||||
@@ -200,4 +202,3 @@ def modify_module(module_path, module_args, task_vars=dict(), strip_comments=Fal
|
||||
module_data = b"\n".join(lines)
|
||||
|
||||
return (module_data, module_style, shebang)
|
||||
|
||||
|
||||
@@ -49,6 +49,7 @@ class HostState:
|
||||
self.cur_rescue_task = 0
|
||||
self.cur_always_task = 0
|
||||
self.cur_role = None
|
||||
self.cur_dep_chain = None
|
||||
self.run_state = PlayIterator.ITERATING_SETUP
|
||||
self.fail_state = PlayIterator.FAILED_NONE
|
||||
self.pending_setup = False
|
||||
@@ -57,14 +58,32 @@ class HostState:
|
||||
self.always_child_state = None
|
||||
|
||||
def __repr__(self):
|
||||
return "HOST STATE: block=%d, task=%d, rescue=%d, always=%d, role=%s, run_state=%d, fail_state=%d, pending_setup=%s, tasks child state? %s, rescue child state? %s, always child state? %s" % (
|
||||
def _run_state_to_string(n):
|
||||
states = ["ITERATING_SETUP", "ITERATING_TASKS", "ITERATING_RESCUE", "ITERATING_ALWAYS", "ITERATING_COMPLETE"]
|
||||
try:
|
||||
return states[n]
|
||||
except IndexError:
|
||||
return "UNKNOWN STATE"
|
||||
|
||||
def _failed_state_to_string(n):
|
||||
states = {1:"FAILED_SETUP", 2:"FAILED_TASKS", 4:"FAILED_RESCUE", 8:"FAILED_ALWAYS"}
|
||||
if n == 0:
|
||||
return "FAILED_NONE"
|
||||
else:
|
||||
ret = []
|
||||
for i in (1, 2, 4, 8):
|
||||
if n & i:
|
||||
ret.append(states[i])
|
||||
return "|".join(ret)
|
||||
|
||||
return "HOST STATE: block=%d, task=%d, rescue=%d, always=%d, role=%s, run_state=%s, fail_state=%s, pending_setup=%s, tasks child state? %s, rescue child state? %s, always child state? %s" % (
|
||||
self.cur_block,
|
||||
self.cur_regular_task,
|
||||
self.cur_rescue_task,
|
||||
self.cur_always_task,
|
||||
self.cur_role,
|
||||
self.run_state,
|
||||
self.fail_state,
|
||||
_run_state_to_string(self.run_state),
|
||||
_failed_state_to_string(self.fail_state),
|
||||
self.pending_setup,
|
||||
self.tasks_child_state,
|
||||
self.rescue_child_state,
|
||||
@@ -84,6 +103,8 @@ class HostState:
|
||||
new_state.run_state = self.run_state
|
||||
new_state.fail_state = self.fail_state
|
||||
new_state.pending_setup = self.pending_setup
|
||||
if self.cur_dep_chain is not None:
|
||||
new_state.cur_dep_chain = self.cur_dep_chain[:]
|
||||
if self.tasks_child_state is not None:
|
||||
new_state.tasks_child_state = self.tasks_child_state.copy()
|
||||
if self.rescue_child_state is not None:
|
||||
@@ -119,30 +140,35 @@ class PlayIterator:
|
||||
self._blocks.append(new_block)
|
||||
|
||||
self._host_states = {}
|
||||
start_at_matched = False
|
||||
for host in inventory.get_hosts(self._play.hosts):
|
||||
self._host_states[host.name] = HostState(blocks=self._blocks)
|
||||
# if the host's name is in the variable manager's fact cache, then set
|
||||
# its _gathered_facts flag to true for smart gathering tests later
|
||||
if host.name in variable_manager._fact_cache:
|
||||
host._gathered_facts = True
|
||||
# if we're looking to start at a specific task, iterate through
|
||||
# the tasks for this host until we find the specified task
|
||||
if play_context.start_at_task is not None and not start_at_done:
|
||||
while True:
|
||||
(s, task) = self.get_next_task_for_host(host, peek=True)
|
||||
if s.run_state == self.ITERATING_COMPLETE:
|
||||
break
|
||||
if task.name == play_context.start_at_task or fnmatch.fnmatch(task.name, play_context.start_at_task) or \
|
||||
task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task):
|
||||
# we have our match, so clear the start_at_task field on the
|
||||
# play context to flag that we've started at a task (and future
|
||||
# plays won't try to advance)
|
||||
play_context.start_at_task = None
|
||||
break
|
||||
else:
|
||||
self.get_next_task_for_host(host)
|
||||
# finally, reset the host's state to ITERATING_SETUP
|
||||
self._host_states[host.name].run_state = self.ITERATING_SETUP
|
||||
self._host_states[host.name] = HostState(blocks=self._blocks)
|
||||
# if the host's name is in the variable manager's fact cache, then set
|
||||
# its _gathered_facts flag to true for smart gathering tests later
|
||||
if host.name in variable_manager._fact_cache:
|
||||
host._gathered_facts = True
|
||||
# if we're looking to start at a specific task, iterate through
|
||||
# the tasks for this host until we find the specified task
|
||||
if play_context.start_at_task is not None and not start_at_done:
|
||||
while True:
|
||||
(s, task) = self.get_next_task_for_host(host, peek=True)
|
||||
if s.run_state == self.ITERATING_COMPLETE:
|
||||
break
|
||||
if task.name == play_context.start_at_task or fnmatch.fnmatch(task.name, play_context.start_at_task) or \
|
||||
task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task):
|
||||
start_at_matched = True
|
||||
break
|
||||
else:
|
||||
self.get_next_task_for_host(host)
|
||||
|
||||
# finally, reset the host's state to ITERATING_SETUP
|
||||
self._host_states[host.name].run_state = self.ITERATING_SETUP
|
||||
|
||||
if start_at_matched:
|
||||
# we have our match, so clear the start_at_task field on the
|
||||
# play context to flag that we've started at a task (and future
|
||||
# plays won't try to advance)
|
||||
play_context.start_at_task = None
|
||||
|
||||
# Extend the play handlers list to include the handlers defined in roles
|
||||
self._play.handlers.extend(play.compile_roles_handlers())
|
||||
@@ -189,13 +215,21 @@ class PlayIterator:
|
||||
s.pending_setup = False
|
||||
|
||||
if not task:
|
||||
old_s = s
|
||||
(s, task) = self._get_next_task_from_state(s, peek=peek)
|
||||
|
||||
def _roles_are_different(ra, rb):
|
||||
if ra != rb:
|
||||
return True
|
||||
else:
|
||||
return old_s.cur_dep_chain != task._block._dep_chain
|
||||
|
||||
if task and task._role:
|
||||
# if we had a current role, mark that role as completed
|
||||
if s.cur_role and task._role != s.cur_role and host.name in s.cur_role._had_task_run and not peek:
|
||||
if s.cur_role and _roles_are_different(task._role, s.cur_role) and host.name in s.cur_role._had_task_run and not peek:
|
||||
s.cur_role._completed[host.name] = True
|
||||
s.cur_role = task._role
|
||||
s.cur_dep_chain = task._block._dep_chain
|
||||
|
||||
if not peek:
|
||||
self._host_states[host.name] = s
|
||||
@@ -324,13 +358,21 @@ class PlayIterator:
|
||||
state.tasks_child_state = self._set_failed_state(state.tasks_child_state)
|
||||
else:
|
||||
state.fail_state |= self.FAILED_TASKS
|
||||
state.run_state = self.ITERATING_RESCUE
|
||||
if state._blocks[state.cur_block].rescue:
|
||||
state.run_state = self.ITERATING_RESCUE
|
||||
elif state._blocks[state.cur_block].always:
|
||||
state.run_state = self.ITERATING_ALWAYS
|
||||
else:
|
||||
state.run_state = self.ITERATING_COMPLETE
|
||||
elif state.run_state == self.ITERATING_RESCUE:
|
||||
if state.rescue_child_state is not None:
|
||||
state.rescue_child_state = self._set_failed_state(state.rescue_child_state)
|
||||
else:
|
||||
state.fail_state |= self.FAILED_RESCUE
|
||||
state.run_state = self.ITERATING_ALWAYS
|
||||
if state._blocks[state.cur_block].always:
|
||||
state.run_state = self.ITERATING_ALWAYS
|
||||
else:
|
||||
state.run_state = self.ITERATING_COMPLETE
|
||||
elif state.run_state == self.ITERATING_ALWAYS:
|
||||
if state.always_child_state is not None:
|
||||
state.always_child_state = self._set_failed_state(state.always_child_state)
|
||||
@@ -347,6 +389,28 @@ class PlayIterator:
|
||||
def get_failed_hosts(self):
|
||||
return dict((host, True) for (host, state) in iteritems(self._host_states) if state.run_state == self.ITERATING_COMPLETE and state.fail_state != self.FAILED_NONE)
|
||||
|
||||
def _check_failed_state(self, state):
|
||||
if state is None:
|
||||
return False
|
||||
elif state.run_state == self.ITERATING_TASKS and self._check_failed_state(state.tasks_child_state):
|
||||
return True
|
||||
elif state.run_state == self.ITERATING_RESCUE and self._check_failed_state(state.rescue_child_state):
|
||||
return True
|
||||
elif state.run_state == self.ITERATING_ALWAYS and self._check_failed_state(state.always_child_state):
|
||||
return True
|
||||
elif state.run_state == self.ITERATING_COMPLETE and state.fail_state != self.FAILED_NONE:
|
||||
if state.run_state == self.ITERATING_RESCUE and state.fail_state&self.FAILED_RESCUE == 0:
|
||||
return False
|
||||
elif state.run_state == self.ITERATING_ALWAYS and state.fail_state&self.FAILED_ALWAYS == 0:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
return False
|
||||
|
||||
def is_failed(self, host):
|
||||
s = self.get_host_state(host)
|
||||
return self._check_failed_state(s)
|
||||
|
||||
def get_original_task(self, host, task):
|
||||
'''
|
||||
Finds the task in the task list which matches the UUID of the given task.
|
||||
@@ -396,7 +460,8 @@ class PlayIterator:
|
||||
return None
|
||||
|
||||
def _insert_tasks_into_state(self, state, task_list):
|
||||
if state.fail_state != self.FAILED_NONE:
|
||||
# if we've failed at all, or if the task list is empty, just return the current state
|
||||
if state.fail_state != self.FAILED_NONE and state.run_state not in (self.ITERATING_RESCUE, self.ITERATING_ALWAYS) or not task_list:
|
||||
return state
|
||||
|
||||
if state.run_state == self.ITERATING_TASKS:
|
||||
|
||||
@@ -31,8 +31,6 @@ from ansible.executor.task_queue_manager import TaskQueueManager
|
||||
from ansible.playbook import Playbook
|
||||
from ansible.template import Templar
|
||||
|
||||
from ansible.utils.color import colorize, hostcolor
|
||||
from ansible.utils.encrypt import do_encrypt
|
||||
from ansible.utils.unicode import to_unicode
|
||||
|
||||
try:
|
||||
@@ -83,6 +81,10 @@ class PlaybookExecutor:
|
||||
if self._tqm is None: # we are doing a listing
|
||||
entry = {'playbook': playbook_path}
|
||||
entry['plays'] = []
|
||||
else:
|
||||
# make sure the tqm has callbacks loaded
|
||||
self._tqm.load_callbacks()
|
||||
self._tqm.send_callback('v2_playbook_on_start', pb)
|
||||
|
||||
i = 1
|
||||
plays = pb.get_plays()
|
||||
@@ -108,10 +110,12 @@ class PlaybookExecutor:
|
||||
salt_size = var.get("salt_size", None)
|
||||
salt = var.get("salt", None)
|
||||
|
||||
if vname not in play.vars:
|
||||
if vname not in self._variable_manager.extra_vars:
|
||||
if self._tqm:
|
||||
self._tqm.send_callback('v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default)
|
||||
play.vars[vname] = self._do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default)
|
||||
play.vars[vname] = display.do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default)
|
||||
else: # we are either in --list-<option> or syntax check
|
||||
play.vars[vname] = default
|
||||
|
||||
# Create a temporary copy of the play here, so we can run post_validate
|
||||
# on it without the templating changes affecting the original object.
|
||||
@@ -128,8 +132,6 @@ class PlaybookExecutor:
|
||||
entry['plays'].append(new_play)
|
||||
|
||||
else:
|
||||
# make sure the tqm has callbacks loaded
|
||||
self._tqm.load_callbacks()
|
||||
self._tqm._unreachable_hosts.update(self._unreachable_hosts)
|
||||
|
||||
# we are actually running plays
|
||||
@@ -149,9 +151,7 @@ class PlaybookExecutor:
|
||||
# conditions are met, we break out, otherwise we only break out if the entire
|
||||
# batch failed
|
||||
failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts)
|
||||
if new_play.any_errors_fatal and failed_hosts_count > 0:
|
||||
break
|
||||
elif new_play.max_fail_percentage is not None and \
|
||||
if new_play.max_fail_percentage is not None and \
|
||||
int((new_play.max_fail_percentage)/100.0 * len(batch)) > int((len(batch) - failed_hosts_count) / len(batch) * 100.0):
|
||||
break
|
||||
elif len(batch) == failed_hosts_count:
|
||||
@@ -171,6 +171,10 @@ class PlaybookExecutor:
|
||||
if entry:
|
||||
entrylist.append(entry) # per playbook
|
||||
|
||||
# send the stats callback for this playbook
|
||||
if self._tqm is not None:
|
||||
self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)
|
||||
|
||||
# if the last result wasn't zero, break out of the playbook file name loop
|
||||
if result != 0:
|
||||
break
|
||||
@@ -186,35 +190,6 @@ class PlaybookExecutor:
|
||||
display.display("No issues encountered")
|
||||
return result
|
||||
|
||||
# TODO: this stat summary stuff should be cleaned up and moved
|
||||
# to a new method, if it even belongs here...
|
||||
display.banner("PLAY RECAP")
|
||||
|
||||
hosts = sorted(self._tqm._stats.processed.keys())
|
||||
for h in hosts:
|
||||
t = self._tqm._stats.summarize(h)
|
||||
|
||||
display.display(u"%s : %s %s %s %s" % (
|
||||
hostcolor(h, t),
|
||||
colorize(u'ok', t['ok'], 'green'),
|
||||
colorize(u'changed', t['changed'], 'yellow'),
|
||||
colorize(u'unreachable', t['unreachable'], 'red'),
|
||||
colorize(u'failed', t['failures'], 'red')),
|
||||
screen_only=True
|
||||
)
|
||||
|
||||
display.display(u"%s : %s %s %s %s" % (
|
||||
hostcolor(h, t, False),
|
||||
colorize(u'ok', t['ok'], None),
|
||||
colorize(u'changed', t['changed'], None),
|
||||
colorize(u'unreachable', t['unreachable'], None),
|
||||
colorize(u'failed', t['failures'], None)),
|
||||
log_only=True
|
||||
)
|
||||
|
||||
display.display("", screen_only=True)
|
||||
# END STATS STUFF
|
||||
|
||||
return result
|
||||
|
||||
def _cleanup(self, signum=None, framenum=None):
|
||||
@@ -258,48 +233,3 @@ class PlaybookExecutor:
|
||||
|
||||
return serialized_batches
|
||||
|
||||
def _do_var_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
|
||||
|
||||
if sys.__stdin__.isatty():
|
||||
if prompt and default is not None:
|
||||
msg = "%s [%s]: " % (prompt, default)
|
||||
elif prompt:
|
||||
msg = "%s: " % prompt
|
||||
else:
|
||||
msg = 'input for %s: ' % varname
|
||||
|
||||
def do_prompt(prompt, private):
|
||||
if sys.stdout.encoding:
|
||||
msg = prompt.encode(sys.stdout.encoding)
|
||||
else:
|
||||
# when piping the output, or at other times when stdout
|
||||
# may not be the standard file descriptor, the stdout
|
||||
# encoding may not be set, so default to something sane
|
||||
msg = prompt.encode(locale.getpreferredencoding())
|
||||
if private:
|
||||
return getpass.getpass(msg)
|
||||
return raw_input(msg)
|
||||
|
||||
if confirm:
|
||||
while True:
|
||||
result = do_prompt(msg, private)
|
||||
second = do_prompt("confirm " + msg, private)
|
||||
if result == second:
|
||||
break
|
||||
display.display("***** VALUES ENTERED DO NOT MATCH ****")
|
||||
else:
|
||||
result = do_prompt(msg, private)
|
||||
else:
|
||||
result = None
|
||||
display.warning("Not prompting as we are not in interactive mode")
|
||||
|
||||
# if result is false and default is not None
|
||||
if not result and default is not None:
|
||||
result = default
|
||||
|
||||
if encrypt:
|
||||
result = do_encrypt(result, encrypt, salt_size, salt)
|
||||
|
||||
# handle utf-8 chars
|
||||
result = to_unicode(result, errors='strict')
|
||||
return result
|
||||
|
||||
@@ -58,7 +58,7 @@ class ResultProcess(multiprocessing.Process):
|
||||
|
||||
def _send_result(self, result):
|
||||
debug(u"sending result: %s" % ([text_type(x) for x in result],))
|
||||
self._final_q.put(result, block=False)
|
||||
self._final_q.put(result)
|
||||
debug("done sending result")
|
||||
|
||||
def _read_worker_result(self):
|
||||
@@ -73,7 +73,7 @@ class ResultProcess(multiprocessing.Process):
|
||||
try:
|
||||
if not rslt_q.empty():
|
||||
debug("worker %d has data to read" % self._cur_worker)
|
||||
result = rslt_q.get(block=False)
|
||||
result = rslt_q.get()
|
||||
debug("got a result from worker %d: %s" % (self._cur_worker, result))
|
||||
break
|
||||
except queue.Empty:
|
||||
@@ -101,7 +101,7 @@ class ResultProcess(multiprocessing.Process):
|
||||
try:
|
||||
result = self._read_worker_result()
|
||||
if result is None:
|
||||
time.sleep(0.01)
|
||||
time.sleep(0.0001)
|
||||
continue
|
||||
|
||||
clean_copy = strip_internal_keys(result._result)
|
||||
@@ -110,7 +110,7 @@ class ResultProcess(multiprocessing.Process):
|
||||
|
||||
# if this task is registering a result, do it now
|
||||
if result._task.register:
|
||||
self._send_result(('register_host_var', result._host, result._task.register, clean_copy))
|
||||
self._send_result(('register_host_var', result._host, result._task, clean_copy))
|
||||
|
||||
# send callbacks, execute other options based on the result status
|
||||
# TODO: this should all be cleaned up and probably moved to a sub-function.
|
||||
@@ -142,8 +142,6 @@ class ResultProcess(multiprocessing.Process):
|
||||
# notifies all other threads
|
||||
for notify in result_item['_ansible_notify']:
|
||||
self._send_result(('notify_handler', result, notify))
|
||||
# now remove the notify field from the results, as its no longer needed
|
||||
result_item.pop('_ansible_notify')
|
||||
|
||||
if 'add_host' in result_item:
|
||||
# this task added a new host (add_host module)
|
||||
|
||||
@@ -59,12 +59,18 @@ class WorkerProcess(multiprocessing.Process):
|
||||
for reading later.
|
||||
'''
|
||||
|
||||
def __init__(self, tqm, main_q, rslt_q, loader):
|
||||
def __init__(self, rslt_q, task_vars, host, task, play_context, loader, variable_manager, shared_loader_obj):
|
||||
|
||||
super(WorkerProcess, self).__init__()
|
||||
# takes a task queue manager as the sole param:
|
||||
self._main_q = main_q
|
||||
self._rslt_q = rslt_q
|
||||
self._loader = loader
|
||||
self._rslt_q = rslt_q
|
||||
self._task_vars = task_vars
|
||||
self._host = host
|
||||
self._task = task
|
||||
self._play_context = play_context
|
||||
self._loader = loader
|
||||
self._variable_manager = variable_manager
|
||||
self._shared_loader_obj = shared_loader_obj
|
||||
|
||||
# dupe stdin, if we have one
|
||||
self._new_stdin = sys.stdin
|
||||
@@ -82,8 +88,6 @@ class WorkerProcess(multiprocessing.Process):
|
||||
# couldn't get stdin's fileno, so we just carry on
|
||||
pass
|
||||
|
||||
super(WorkerProcess, self).__init__()
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
Called when the process is started, and loops indefinitely
|
||||
@@ -97,72 +101,45 @@ class WorkerProcess(multiprocessing.Process):
|
||||
if HAS_ATFORK:
|
||||
atfork()
|
||||
|
||||
while True:
|
||||
task = None
|
||||
try:
|
||||
debug("waiting for a message...")
|
||||
(host, task, basedir, zip_vars, hostvars, compressed_vars, play_context, shared_loader_obj) = self._main_q.get()
|
||||
try:
|
||||
# execute the task and build a TaskResult from the result
|
||||
debug("running TaskExecutor() for %s/%s" % (self._host, self._task))
|
||||
executor_result = TaskExecutor(
|
||||
self._host,
|
||||
self._task,
|
||||
self._task_vars,
|
||||
self._play_context,
|
||||
self._new_stdin,
|
||||
self._loader,
|
||||
self._shared_loader_obj,
|
||||
).run()
|
||||
|
||||
if compressed_vars:
|
||||
job_vars = json.loads(zlib.decompress(zip_vars))
|
||||
else:
|
||||
job_vars = zip_vars
|
||||
job_vars['hostvars'] = hostvars
|
||||
debug("done running TaskExecutor() for %s/%s" % (self._host, self._task))
|
||||
self._host.vars = dict()
|
||||
self._host.groups = []
|
||||
task_result = TaskResult(self._host, self._task, executor_result)
|
||||
|
||||
debug("there's work to be done! got a task/handler to work on: %s" % task)
|
||||
# put the result on the result queue
|
||||
debug("sending task result")
|
||||
self._rslt_q.put(task_result)
|
||||
debug("done sending task result")
|
||||
|
||||
# because the task queue manager starts workers (forks) before the
|
||||
# playbook is loaded, set the basedir of the loader inherted by
|
||||
# this fork now so that we can find files correctly
|
||||
self._loader.set_basedir(basedir)
|
||||
except AnsibleConnectionFailure:
|
||||
self._host.vars = dict()
|
||||
self._host.groups = []
|
||||
task_result = TaskResult(self._host, self._task, dict(unreachable=True))
|
||||
self._rslt_q.put(task_result, block=False)
|
||||
|
||||
# Serializing/deserializing tasks does not preserve the loader attribute,
|
||||
# since it is passed to the worker during the forking of the process and
|
||||
# would be wasteful to serialize. So we set it here on the task now, and
|
||||
# the task handles updating parent/child objects as needed.
|
||||
task.set_loader(self._loader)
|
||||
|
||||
# execute the task and build a TaskResult from the result
|
||||
debug("running TaskExecutor() for %s/%s" % (host, task))
|
||||
executor_result = TaskExecutor(
|
||||
host,
|
||||
task,
|
||||
job_vars,
|
||||
play_context,
|
||||
self._new_stdin,
|
||||
self._loader,
|
||||
shared_loader_obj,
|
||||
).run()
|
||||
debug("done running TaskExecutor() for %s/%s" % (host, task))
|
||||
task_result = TaskResult(host, task, executor_result)
|
||||
|
||||
# put the result on the result queue
|
||||
debug("sending task result")
|
||||
self._rslt_q.put(task_result)
|
||||
debug("done sending task result")
|
||||
|
||||
except queue.Empty:
|
||||
pass
|
||||
except AnsibleConnectionFailure:
|
||||
except Exception as e:
|
||||
if not isinstance(e, (IOError, EOFError, KeyboardInterrupt)) or isinstance(e, TemplateNotFound):
|
||||
try:
|
||||
if task:
|
||||
task_result = TaskResult(host, task, dict(unreachable=True))
|
||||
self._rslt_q.put(task_result, block=False)
|
||||
self._host.vars = dict()
|
||||
self._host.groups = []
|
||||
task_result = TaskResult(self._host, self._task, dict(failed=True, exception=traceback.format_exc(), stdout=''))
|
||||
self._rslt_q.put(task_result, block=False)
|
||||
except:
|
||||
break
|
||||
except Exception as e:
|
||||
if isinstance(e, (IOError, EOFError, KeyboardInterrupt)) and not isinstance(e, TemplateNotFound):
|
||||
break
|
||||
else:
|
||||
try:
|
||||
if task:
|
||||
task_result = TaskResult(host, task, dict(failed=True, exception=traceback.format_exc(), stdout=''))
|
||||
self._rslt_q.put(task_result, block=False)
|
||||
except:
|
||||
debug("WORKER EXCEPTION: %s" % e)
|
||||
debug("WORKER EXCEPTION: %s" % traceback.format_exc())
|
||||
break
|
||||
debug("WORKER EXCEPTION: %s" % e)
|
||||
debug("WORKER EXCEPTION: %s" % traceback.format_exc())
|
||||
|
||||
debug("WORKER PROCESS EXITING")
|
||||
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@ from ansible.template import Templar
|
||||
from ansible.utils.encrypt import key_for_hostname
|
||||
from ansible.utils.listify import listify_lookup_plugin_terms
|
||||
from ansible.utils.unicode import to_unicode
|
||||
from ansible.vars.unsafe_proxy import UnsafeProxy
|
||||
from ansible.vars.unsafe_proxy import UnsafeProxy, wrap_var
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
@@ -67,6 +67,7 @@ class TaskExecutor:
|
||||
self._new_stdin = new_stdin
|
||||
self._loader = loader
|
||||
self._shared_loader_obj = shared_loader_obj
|
||||
self._connection = None
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
@@ -145,7 +146,7 @@ class TaskExecutor:
|
||||
except AttributeError:
|
||||
pass
|
||||
except Exception as e:
|
||||
display.debug("error closing connection: %s" % to_unicode(e))
|
||||
display.debug(u"error closing connection: %s" % to_unicode(e))
|
||||
|
||||
def _get_loop_items(self):
|
||||
'''
|
||||
@@ -153,16 +154,19 @@ class TaskExecutor:
|
||||
and returns the items result.
|
||||
'''
|
||||
|
||||
# create a copy of the job vars here so that we can modify
|
||||
# them temporarily without changing them too early for other
|
||||
# parts of the code that might still need a pristine version
|
||||
#vars_copy = self._job_vars.copy()
|
||||
vars_copy = self._job_vars
|
||||
# save the play context variables to a temporary dictionary,
|
||||
# so that we can modify the job vars without doing a full copy
|
||||
# and later restore them to avoid modifying things too early
|
||||
play_context_vars = dict()
|
||||
self._play_context.update_vars(play_context_vars)
|
||||
|
||||
# now we update them with the play context vars
|
||||
self._play_context.update_vars(vars_copy)
|
||||
old_vars = dict()
|
||||
for k in play_context_vars.keys():
|
||||
if k in self._job_vars:
|
||||
old_vars[k] = self._job_vars[k]
|
||||
self._job_vars[k] = play_context_vars[k]
|
||||
|
||||
templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=vars_copy)
|
||||
templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=self._job_vars)
|
||||
items = None
|
||||
if self._task.loop:
|
||||
if self._task.loop in self._shared_loader_obj.lookup_loader:
|
||||
@@ -179,16 +183,25 @@ class TaskExecutor:
|
||||
loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar,
|
||||
loader=self._loader, fail_on_undefined=True, convert_bare=True)
|
||||
except AnsibleUndefinedVariable as e:
|
||||
if 'has no attribute' in str(e):
|
||||
if u'has no attribute' in to_unicode(e):
|
||||
loop_terms = []
|
||||
display.deprecated("Skipping task due to undefined attribute, in the future this will be a fatal error.")
|
||||
else:
|
||||
raise
|
||||
items = self._shared_loader_obj.lookup_loader.get(self._task.loop, loader=self._loader,
|
||||
templar=templar).run(terms=loop_terms, variables=vars_copy)
|
||||
templar=templar).run(terms=loop_terms, variables=self._job_vars)
|
||||
else:
|
||||
raise AnsibleError("Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % self._task.loop)
|
||||
|
||||
# now we restore any old job variables that may have been modified,
|
||||
# and delete them if they were in the play context vars but not in
|
||||
# the old variables dictionary
|
||||
for k in play_context_vars.keys():
|
||||
if k in old_vars:
|
||||
self._job_vars[k] = old_vars[k]
|
||||
else:
|
||||
del self._job_vars[k]
|
||||
|
||||
if items:
|
||||
from ansible.vars.unsafe_proxy import UnsafeProxy
|
||||
for idx, item in enumerate(items):
|
||||
@@ -218,7 +231,7 @@ class TaskExecutor:
|
||||
tmp_task = self._task.copy()
|
||||
tmp_play_context = self._play_context.copy()
|
||||
except AnsibleParserError as e:
|
||||
results.append(dict(failed=True, msg=str(e)))
|
||||
results.append(dict(failed=True, msg=to_unicode(e)))
|
||||
continue
|
||||
|
||||
# now we swap the internal task and play context with their copies,
|
||||
@@ -232,6 +245,7 @@ class TaskExecutor:
|
||||
# now update the result with the item info, and append the result
|
||||
# to the list of results
|
||||
res['item'] = item
|
||||
#TODO: send item results to callback here, instead of all at the end
|
||||
results.append(res)
|
||||
|
||||
return results
|
||||
@@ -302,6 +316,11 @@ class TaskExecutor:
|
||||
# do the same kind of post validation step on it here before we use it.
|
||||
self._play_context.post_validate(templar=templar)
|
||||
|
||||
# now that the play context is finalized, if the remote_addr is not set
|
||||
# default to using the host's address field as the remote address
|
||||
if not self._play_context.remote_addr:
|
||||
self._play_context.remote_addr = self._host.address
|
||||
|
||||
# We also add "magic" variables back into the variables dict to make sure
|
||||
# a certain subset of variables exist.
|
||||
self._play_context.update_vars(variables)
|
||||
@@ -348,8 +367,13 @@ class TaskExecutor:
|
||||
self._task.args = variable_params
|
||||
|
||||
# get the connection and the handler for this execution
|
||||
self._connection = self._get_connection(variables=variables, templar=templar)
|
||||
self._connection.set_host_overrides(host=self._host)
|
||||
if not self._connection or not getattr(self._connection, 'connected', False) or self._play_context.remote_addr != self._connection._play_context.remote_addr:
|
||||
self._connection = self._get_connection(variables=variables, templar=templar)
|
||||
self._connection.set_host_overrides(host=self._host)
|
||||
else:
|
||||
# if connection is reused, its _play_context is no longer valid and needs
|
||||
# to be replaced with the one templated above, in case other data changed
|
||||
self._connection._play_context = self._play_context
|
||||
|
||||
self._handler = self._get_action_handler(connection=self._connection, templar=templar)
|
||||
|
||||
@@ -372,30 +396,36 @@ class TaskExecutor:
|
||||
|
||||
# make a copy of the job vars here, in case we need to update them
|
||||
# with the registered variable value later on when testing conditions
|
||||
#vars_copy = variables.copy()
|
||||
vars_copy = variables.copy()
|
||||
|
||||
display.debug("starting attempt loop")
|
||||
result = None
|
||||
for attempt in range(retries):
|
||||
if attempt > 0:
|
||||
display.display("FAILED - RETRYING: %s (%d retries left). Result was: %s" % (self._task, retries-attempt, result), color="dark gray")
|
||||
display.display("FAILED - RETRYING: %s (%d retries left). Result was: %s" % (self._task, retries-attempt, result), color=C.COLOR_DEBUG)
|
||||
result['attempts'] = attempt + 1
|
||||
|
||||
display.debug("running the handler")
|
||||
try:
|
||||
result = self._handler.run(task_vars=variables)
|
||||
except AnsibleConnectionFailure as e:
|
||||
return dict(unreachable=True, msg=str(e))
|
||||
return dict(unreachable=True, msg=to_unicode(e))
|
||||
display.debug("handler run complete")
|
||||
|
||||
# update the local copy of vars with the registered value, if specified,
|
||||
# or any facts which may have been generated by the module execution
|
||||
if self._task.register:
|
||||
vars_copy[self._task.register] = wrap_var(result.copy())
|
||||
|
||||
if self._task.async > 0:
|
||||
# the async_wrapper module returns dumped JSON via its stdout
|
||||
# response, so we parse it here and replace the result
|
||||
try:
|
||||
if 'skipped' in result and result['skipped'] or 'failed' in result and result['failed']:
|
||||
return result
|
||||
result = json.loads(result.get('stdout'))
|
||||
except (TypeError, ValueError) as e:
|
||||
return dict(failed=True, msg="The async task did not return valid JSON: %s" % str(e))
|
||||
return dict(failed=True, msg=u"The async task did not return valid JSON: %s" % to_unicode(e))
|
||||
|
||||
if self._task.poll > 0:
|
||||
result = self._poll_async_result(result=result, templar=templar)
|
||||
@@ -416,11 +446,6 @@ class TaskExecutor:
|
||||
return failed_when_result
|
||||
return False
|
||||
|
||||
# update the local copy of vars with the registered value, if specified,
|
||||
# or any facts which may have been generated by the module execution
|
||||
if self._task.register:
|
||||
vars_copy[self._task.register] = result
|
||||
|
||||
if 'ansible_facts' in result:
|
||||
vars_copy.update(result['ansible_facts'])
|
||||
|
||||
@@ -437,7 +462,7 @@ class TaskExecutor:
|
||||
|
||||
if attempt < retries - 1:
|
||||
cond = Conditional(loader=self._loader)
|
||||
cond.when = self._task.until
|
||||
cond.when = [ self._task.until ]
|
||||
if cond.evaluate_conditional(templar, vars_copy):
|
||||
break
|
||||
|
||||
@@ -450,7 +475,7 @@ class TaskExecutor:
|
||||
# do the final update of the local variables here, for both registered
|
||||
# values and any facts which may have been created
|
||||
if self._task.register:
|
||||
variables[self._task.register] = result
|
||||
variables[self._task.register] = wrap_var(result)
|
||||
|
||||
if 'ansible_facts' in result:
|
||||
variables.update(result['ansible_facts'])
|
||||
@@ -528,9 +553,6 @@ class TaskExecutor:
|
||||
correct connection object from the list of connection plugins
|
||||
'''
|
||||
|
||||
if not self._play_context.remote_addr:
|
||||
self._play_context.remote_addr = self._host.address
|
||||
|
||||
if self._task.delegate_to is not None:
|
||||
# since we're delegating, we don't want to use interpreter values
|
||||
# which would have been set for the original target host
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from multiprocessing.managers import SyncManager, DictProxy
|
||||
import multiprocessing
|
||||
import os
|
||||
import tempfile
|
||||
@@ -32,6 +33,8 @@ from ansible.executor.stats import AggregateStats
|
||||
from ansible.playbook.play_context import PlayContext
|
||||
from ansible.plugins import callback_loader, strategy_loader, module_loader
|
||||
from ansible.template import Templar
|
||||
from ansible.vars.hostvars import HostVars
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
@@ -54,7 +57,7 @@ class TaskQueueManager:
|
||||
which dispatches the Play's tasks to hosts.
|
||||
'''
|
||||
|
||||
def __init__(self, inventory, variable_manager, loader, options, passwords, stdout_callback=None):
|
||||
def __init__(self, inventory, variable_manager, loader, options, passwords, stdout_callback=None, run_additional_callbacks=True, run_tree=False):
|
||||
|
||||
self._inventory = inventory
|
||||
self._variable_manager = variable_manager
|
||||
@@ -63,6 +66,8 @@ class TaskQueueManager:
|
||||
self._stats = AggregateStats()
|
||||
self.passwords = passwords
|
||||
self._stdout_callback = stdout_callback
|
||||
self._run_additional_callbacks = run_additional_callbacks
|
||||
self._run_tree = run_tree
|
||||
|
||||
self._callbacks_loaded = False
|
||||
self._callback_plugins = []
|
||||
@@ -94,14 +99,10 @@ class TaskQueueManager:
|
||||
def _initialize_processes(self, num):
|
||||
self._workers = []
|
||||
|
||||
for i in xrange(num):
|
||||
for i in range(num):
|
||||
main_q = multiprocessing.Queue()
|
||||
rslt_q = multiprocessing.Queue()
|
||||
|
||||
prc = WorkerProcess(self, main_q, rslt_q, self._loader)
|
||||
prc.start()
|
||||
|
||||
self._workers.append((prc, main_q, rslt_q))
|
||||
self._workers.append([None, main_q, rslt_q])
|
||||
|
||||
self._result_prc = ResultProcess(self._final_q, self._workers)
|
||||
self._result_prc.start()
|
||||
@@ -142,8 +143,16 @@ class TaskQueueManager:
|
||||
if self._stdout_callback is None:
|
||||
self._stdout_callback = C.DEFAULT_STDOUT_CALLBACK
|
||||
|
||||
if self._stdout_callback not in callback_loader:
|
||||
raise AnsibleError("Invalid callback for stdout specified: %s" % self._stdout_callback)
|
||||
if isinstance(self._stdout_callback, CallbackBase):
|
||||
stdout_callback_loaded = True
|
||||
elif isinstance(self._stdout_callback, basestring):
|
||||
if self._stdout_callback not in callback_loader:
|
||||
raise AnsibleError("Invalid callback for stdout specified: %s" % self._stdout_callback)
|
||||
else:
|
||||
self._stdout_callback = callback_loader.get(self._stdout_callback)
|
||||
stdout_callback_loaded = True
|
||||
else:
|
||||
raise AnsibleError("callback must be an instance of CallbackBase or the name of a callback plugin")
|
||||
|
||||
for callback_plugin in callback_loader.all(class_only=True):
|
||||
if hasattr(callback_plugin, 'CALLBACK_VERSION') and callback_plugin.CALLBACK_VERSION >= 2.0:
|
||||
@@ -157,7 +166,9 @@ class TaskQueueManager:
|
||||
if callback_name != self._stdout_callback or stdout_callback_loaded:
|
||||
continue
|
||||
stdout_callback_loaded = True
|
||||
elif callback_needs_whitelist and (C.DEFAULT_CALLBACK_WHITELIST is None or callback_name not in C.DEFAULT_CALLBACK_WHITELIST):
|
||||
elif callback_name == 'tree' and self._run_tree:
|
||||
pass
|
||||
elif not self._run_additional_callbacks or (callback_needs_whitelist and (C.DEFAULT_CALLBACK_WHITELIST is None or callback_name not in C.DEFAULT_CALLBACK_WHITELIST)):
|
||||
continue
|
||||
|
||||
self._callback_plugins.append(callback_plugin())
|
||||
@@ -173,11 +184,6 @@ class TaskQueueManager:
|
||||
are done with the current task).
|
||||
'''
|
||||
|
||||
# Fork # of forks, # of hosts or serial, whichever is lowest
|
||||
contenders = [self._options.forks, play.serial, len(self._inventory.get_hosts(play.hosts))]
|
||||
contenders = [ v for v in contenders if v is not None and v > 0 ]
|
||||
self._initialize_processes(min(contenders))
|
||||
|
||||
if not self._callbacks_loaded:
|
||||
self.load_callbacks()
|
||||
|
||||
@@ -187,6 +193,17 @@ class TaskQueueManager:
|
||||
new_play = play.copy()
|
||||
new_play.post_validate(templar)
|
||||
|
||||
self.hostvars = HostVars(
|
||||
inventory=self._inventory,
|
||||
variable_manager=self._variable_manager,
|
||||
loader=self._loader,
|
||||
)
|
||||
|
||||
# Fork # of forks, # of hosts or serial, whichever is lowest
|
||||
contenders = [self._options.forks, play.serial, len(self._inventory.get_hosts(new_play.hosts))]
|
||||
contenders = [ v for v in contenders if v is not None and v > 0 ]
|
||||
self._initialize_processes(min(contenders))
|
||||
|
||||
play_context = PlayContext(new_play, self._options, self.passwords, self._connection_lockfile.fileno())
|
||||
for callback_plugin in self._callback_plugins:
|
||||
if hasattr(callback_plugin, 'set_play_context'):
|
||||
@@ -236,7 +253,8 @@ class TaskQueueManager:
|
||||
for (worker_prc, main_q, rslt_q) in self._workers:
|
||||
rslt_q.close()
|
||||
main_q.close()
|
||||
worker_prc.terminate()
|
||||
if worker_prc and worker_prc.is_alive():
|
||||
worker_prc.terminate()
|
||||
|
||||
def clear_failed_hosts(self):
|
||||
self._failed_hosts = dict()
|
||||
@@ -260,7 +278,7 @@ class TaskQueueManager:
|
||||
self._terminated = True
|
||||
|
||||
def send_callback(self, method_name, *args, **kwargs):
|
||||
for callback_plugin in self._callback_plugins:
|
||||
for callback_plugin in [self._stdout_callback] + self._callback_plugins:
|
||||
# a plugin that set self.disabled to True will not be called
|
||||
# see osx_say.py example for such a plugin
|
||||
if getattr(callback_plugin, 'disabled', False):
|
||||
@@ -272,10 +290,28 @@ class TaskQueueManager:
|
||||
for method in methods:
|
||||
if method is not None:
|
||||
try:
|
||||
method(*args, **kwargs)
|
||||
# temporary hack, required due to a change in the callback API, so
|
||||
# we don't break backwards compatibility with callbacks which were
|
||||
# designed to use the original API
|
||||
# FIXME: target for removal and revert to the original code here
|
||||
# after a year (2017-01-14)
|
||||
if method_name == 'v2_playbook_on_start':
|
||||
import inspect
|
||||
(f_args, f_varargs, f_keywords, f_defaults) = inspect.getargspec(method)
|
||||
if 'playbook' in f_args:
|
||||
method(*args, **kwargs)
|
||||
else:
|
||||
method()
|
||||
else:
|
||||
method(*args, **kwargs)
|
||||
except Exception as e:
|
||||
import traceback
|
||||
orig_tb = traceback.format_exc()
|
||||
try:
|
||||
v1_method = method.replace('v2_','')
|
||||
v1_method(*args, **kwargs)
|
||||
except Exception:
|
||||
display.warning('Error when using %s: %s' % (method, str(e)))
|
||||
if display.verbosity >= 3:
|
||||
display.warning(orig_tb, formatted=True)
|
||||
else:
|
||||
display.warning('Error when using %s: %s' % (method, str(e)))
|
||||
|
||||
@@ -49,9 +49,34 @@ class Galaxy(object):
|
||||
this_dir, this_filename = os.path.split(__file__)
|
||||
self.DATA_PATH = os.path.join(this_dir, "data")
|
||||
|
||||
#TODO: move to getter for lazy loading
|
||||
self.default_readme = self._str_from_data_file('readme')
|
||||
self.default_meta = self._str_from_data_file('metadata_template.j2')
|
||||
self._default_readme = None
|
||||
self._default_meta = None
|
||||
self._default_test = None
|
||||
self._default_travis = None
|
||||
|
||||
@property
|
||||
def default_readme(self):
|
||||
if self._default_readme is None:
|
||||
self._default_readme = self._str_from_data_file('readme')
|
||||
return self._default_readme
|
||||
|
||||
@property
|
||||
def default_meta(self):
|
||||
if self._default_meta is None:
|
||||
self._default_meta = self._str_from_data_file('metadata_template.j2')
|
||||
return self._default_meta
|
||||
|
||||
@property
|
||||
def default_test(self):
|
||||
if self._default_test is None:
|
||||
self._default_test = self._str_from_data_file('test_playbook.j2')
|
||||
return self._default_test
|
||||
|
||||
@property
|
||||
def default_travis(self):
|
||||
if self._default_travis is None:
|
||||
self._default_travis = self._str_from_data_file('travis.j2')
|
||||
return self._default_travis
|
||||
|
||||
def add_role(self, role):
|
||||
self.roles[role.name] = role
|
||||
|
||||
@@ -25,11 +25,15 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
import urllib
|
||||
|
||||
from urllib2 import quote as urlquote, HTTPError
|
||||
from urlparse import urlparse
|
||||
|
||||
import ansible.constants as C
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.galaxy.token import GalaxyToken
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
@@ -43,45 +47,111 @@ class GalaxyAPI(object):
|
||||
|
||||
SUPPORTED_VERSIONS = ['v1']
|
||||
|
||||
def __init__(self, galaxy, api_server):
|
||||
|
||||
def __init__(self, galaxy):
|
||||
self.galaxy = galaxy
|
||||
self.token = GalaxyToken()
|
||||
self._api_server = C.GALAXY_SERVER
|
||||
self._validate_certs = not C.GALAXY_IGNORE_CERTS
|
||||
|
||||
try:
|
||||
urlparse(api_server, scheme='https')
|
||||
except:
|
||||
raise AnsibleError("Invalid server API url passed: %s" % api_server)
|
||||
# set validate_certs
|
||||
if galaxy.options.ignore_certs:
|
||||
self._validate_certs = False
|
||||
display.vvv('Validate TLS certificates: %s' % self._validate_certs)
|
||||
|
||||
server_version = self.get_server_api_version('%s/api/' % (api_server))
|
||||
if not server_version:
|
||||
raise AnsibleError("Could not retrieve server API version: %s" % api_server)
|
||||
# set the API server
|
||||
if galaxy.options.api_server != C.GALAXY_SERVER:
|
||||
self._api_server = galaxy.options.api_server
|
||||
display.vvv("Connecting to galaxy_server: %s" % self._api_server)
|
||||
|
||||
if server_version in self.SUPPORTED_VERSIONS:
|
||||
self.baseurl = '%s/api/%s' % (api_server, server_version)
|
||||
self.version = server_version # for future use
|
||||
display.vvvvv("Base API: %s" % self.baseurl)
|
||||
else:
|
||||
server_version = self.get_server_api_version()
|
||||
if not server_version in self.SUPPORTED_VERSIONS:
|
||||
raise AnsibleError("Unsupported Galaxy server API version: %s" % server_version)
|
||||
|
||||
def get_server_api_version(self, api_server):
|
||||
self.baseurl = '%s/api/%s' % (self._api_server, server_version)
|
||||
self.version = server_version # for future use
|
||||
display.vvv("Base API: %s" % self.baseurl)
|
||||
|
||||
def __auth_header(self):
|
||||
token = self.token.get()
|
||||
if token is None:
|
||||
raise AnsibleError("No access token. You must first use login to authenticate and obtain an access token.")
|
||||
return {'Authorization': 'Token ' + token}
|
||||
|
||||
def __call_galaxy(self, url, args=None, headers=None, method=None):
|
||||
if args and not headers:
|
||||
headers = self.__auth_header()
|
||||
try:
|
||||
display.vvv(url)
|
||||
resp = open_url(url, data=args, validate_certs=self._validate_certs, headers=headers, method=method)
|
||||
data = json.load(resp)
|
||||
except HTTPError as e:
|
||||
res = json.load(e)
|
||||
raise AnsibleError(res['detail'])
|
||||
return data
|
||||
|
||||
@property
|
||||
def api_server(self):
|
||||
return self._api_server
|
||||
|
||||
@property
|
||||
def validate_certs(self):
|
||||
return self._validate_certs
|
||||
|
||||
def get_server_api_version(self):
|
||||
"""
|
||||
Fetches the Galaxy API current version to ensure
|
||||
the API server is up and reachable.
|
||||
"""
|
||||
#TODO: fix galaxy server which returns current_version path (/api/v1) vs actual version (v1)
|
||||
# also should set baseurl using supported_versions which has path
|
||||
return 'v1'
|
||||
|
||||
try:
|
||||
data = json.load(open_url(api_server, validate_certs=self.galaxy.options.validate_certs))
|
||||
return data.get("current_version", 'v1')
|
||||
except Exception:
|
||||
# TODO: report error
|
||||
return None
|
||||
url = '%s/api/' % self._api_server
|
||||
data = json.load(open_url(url, validate_certs=self._validate_certs))
|
||||
return data['current_version']
|
||||
except Exception as e:
|
||||
raise AnsibleError("The API server (%s) is not responding, please try again later." % url)
|
||||
|
||||
def authenticate(self, github_token):
|
||||
"""
|
||||
Retrieve an authentication token
|
||||
"""
|
||||
url = '%s/tokens/' % self.baseurl
|
||||
args = urllib.urlencode({"github_token": github_token})
|
||||
resp = open_url(url, data=args, validate_certs=self._validate_certs, method="POST")
|
||||
data = json.load(resp)
|
||||
return data
|
||||
|
||||
def create_import_task(self, github_user, github_repo, reference=None):
|
||||
"""
|
||||
Post an import request
|
||||
"""
|
||||
url = '%s/imports/' % self.baseurl
|
||||
args = urllib.urlencode({
|
||||
"github_user": github_user,
|
||||
"github_repo": github_repo,
|
||||
"github_reference": reference if reference else ""
|
||||
})
|
||||
data = self.__call_galaxy(url, args=args)
|
||||
if data.get('results', None):
|
||||
return data['results']
|
||||
return data
|
||||
|
||||
def get_import_task(self, task_id=None, github_user=None, github_repo=None):
|
||||
"""
|
||||
Check the status of an import task.
|
||||
"""
|
||||
url = '%s/imports/' % self.baseurl
|
||||
if not task_id is None:
|
||||
url = "%s?id=%d" % (url,task_id)
|
||||
elif not github_user is None and not github_repo is None:
|
||||
url = "%s?github_user=%s&github_repo=%s" % (url,github_user,github_repo)
|
||||
else:
|
||||
raise AnsibleError("Expected task_id or github_user and github_repo")
|
||||
|
||||
data = self.__call_galaxy(url)
|
||||
return data['results']
|
||||
|
||||
def lookup_role_by_name(self, role_name, notify=True):
|
||||
"""
|
||||
Find a role by name
|
||||
Find a role by name.
|
||||
"""
|
||||
role_name = urlquote(role_name)
|
||||
|
||||
@@ -92,18 +162,12 @@ class GalaxyAPI(object):
|
||||
if notify:
|
||||
display.display("- downloading role '%s', owned by %s" % (role_name, user_name))
|
||||
except:
|
||||
raise AnsibleError("- invalid role name (%s). Specify role as format: username.rolename" % role_name)
|
||||
raise AnsibleError("Invalid role name (%s). Specify role as format: username.rolename" % role_name)
|
||||
|
||||
url = '%s/roles/?owner__username=%s&name=%s' % (self.baseurl, user_name, role_name)
|
||||
display.vvvv("- %s" % (url))
|
||||
try:
|
||||
data = json.load(open_url(url, validate_certs=self.galaxy.options.validate_certs))
|
||||
if len(data["results"]) != 0:
|
||||
return data["results"][0]
|
||||
except:
|
||||
# TODO: report on connection/availability errors
|
||||
pass
|
||||
|
||||
data = self.__call_galaxy(url)
|
||||
if len(data["results"]) != 0:
|
||||
return data["results"][0]
|
||||
return None
|
||||
|
||||
def fetch_role_related(self, related, role_id):
|
||||
@@ -114,13 +178,12 @@ class GalaxyAPI(object):
|
||||
|
||||
try:
|
||||
url = '%s/roles/%d/%s/?page_size=50' % (self.baseurl, int(role_id), related)
|
||||
data = json.load(open_url(url, validate_certs=self.galaxy.options.validate_certs))
|
||||
data = self.__call_galaxy(url)
|
||||
results = data['results']
|
||||
done = (data.get('next', None) is None)
|
||||
while not done:
|
||||
url = '%s%s' % (self.baseurl, data['next'])
|
||||
display.display(url)
|
||||
data = json.load(open_url(url, validate_certs=self.galaxy.options.validate_certs))
|
||||
data = self.__call_galaxy(url)
|
||||
results += data['results']
|
||||
done = (data.get('next', None) is None)
|
||||
return results
|
||||
@@ -131,10 +194,9 @@ class GalaxyAPI(object):
|
||||
"""
|
||||
Fetch the list of items specified.
|
||||
"""
|
||||
|
||||
try:
|
||||
url = '%s/%s/?page_size' % (self.baseurl, what)
|
||||
data = json.load(open_url(url, validate_certs=self.galaxy.options.validate_certs))
|
||||
data = self.__call_galaxy(url)
|
||||
if "results" in data:
|
||||
results = data['results']
|
||||
else:
|
||||
@@ -144,41 +206,64 @@ class GalaxyAPI(object):
|
||||
done = (data.get('next', None) is None)
|
||||
while not done:
|
||||
url = '%s%s' % (self.baseurl, data['next'])
|
||||
display.display(url)
|
||||
data = json.load(open_url(url, validate_certs=self.galaxy.options.validate_certs))
|
||||
data = self.__call_galaxy(url)
|
||||
results += data['results']
|
||||
done = (data.get('next', None) is None)
|
||||
return results
|
||||
except Exception as error:
|
||||
raise AnsibleError("Failed to download the %s list: %s" % (what, str(error)))
|
||||
|
||||
def search_roles(self, search, platforms=None, tags=None):
|
||||
def search_roles(self, search, **kwargs):
|
||||
|
||||
search_url = self.baseurl + '/roles/?page=1'
|
||||
search_url = self.baseurl + '/search/roles/?'
|
||||
|
||||
if search:
|
||||
search_url += '&search=' + urlquote(search)
|
||||
search_url += '&autocomplete=' + urlquote(search)
|
||||
|
||||
if tags is None:
|
||||
tags = []
|
||||
elif isinstance(tags, basestring):
|
||||
tags = kwargs.get('tags',None)
|
||||
platforms = kwargs.get('platforms', None)
|
||||
page_size = kwargs.get('page_size', None)
|
||||
author = kwargs.get('author', None)
|
||||
|
||||
if tags and isinstance(tags, basestring):
|
||||
tags = tags.split(',')
|
||||
|
||||
for tag in tags:
|
||||
search_url += '&chain__tags__name=' + urlquote(tag)
|
||||
|
||||
if platforms is None:
|
||||
platforms = []
|
||||
elif isinstance(platforms, basestring):
|
||||
search_url += '&tags_autocomplete=' + '+'.join(tags)
|
||||
|
||||
if platforms and isinstance(platforms, basestring):
|
||||
platforms = platforms.split(',')
|
||||
search_url += '&platforms_autocomplete=' + '+'.join(platforms)
|
||||
|
||||
for plat in platforms:
|
||||
search_url += '&chain__platforms__name=' + urlquote(plat)
|
||||
|
||||
display.debug("Executing query: %s" % search_url)
|
||||
try:
|
||||
data = json.load(open_url(search_url, validate_certs=self.galaxy.options.validate_certs))
|
||||
except HTTPError as e:
|
||||
raise AnsibleError("Unsuccessful request to server: %s" % str(e))
|
||||
if page_size:
|
||||
search_url += '&page_size=%s' % page_size
|
||||
|
||||
if author:
|
||||
search_url += '&username_autocomplete=%s' % author
|
||||
|
||||
data = self.__call_galaxy(search_url)
|
||||
return data
|
||||
|
||||
def add_secret(self, source, github_user, github_repo, secret):
|
||||
url = "%s/notification_secrets/" % self.baseurl
|
||||
args = urllib.urlencode({
|
||||
"source": source,
|
||||
"github_user": github_user,
|
||||
"github_repo": github_repo,
|
||||
"secret": secret
|
||||
})
|
||||
data = self.__call_galaxy(url, args=args)
|
||||
return data
|
||||
|
||||
def list_secrets(self):
|
||||
url = "%s/notification_secrets" % self.baseurl
|
||||
data = self.__call_galaxy(url, headers=self.__auth_header())
|
||||
return data
|
||||
|
||||
def remove_secret(self, secret_id):
|
||||
url = "%s/notification_secrets/%s/" % (self.baseurl, secret_id)
|
||||
data = self.__call_galaxy(url, headers=self.__auth_header(), method='DELETE')
|
||||
return data
|
||||
|
||||
def delete_role(self, github_user, github_repo):
|
||||
url = "%s/removerole/?github_user=%s&github_repo=%s" % (self.baseurl,github_user,github_repo)
|
||||
data = self.__call_galaxy(url, headers=self.__auth_header(), method='DELETE')
|
||||
return data
|
||||
|
||||
@@ -2,9 +2,11 @@ galaxy_info:
|
||||
author: {{ author }}
|
||||
description: {{description}}
|
||||
company: {{ company }}
|
||||
|
||||
# If the issue tracker for your role is not on github, uncomment the
|
||||
# next line and provide a value
|
||||
# issue_tracker_url: {{ issue_tracker_url }}
|
||||
|
||||
# Some suggested licenses:
|
||||
# - BSD (default)
|
||||
# - MIT
|
||||
@@ -13,7 +15,17 @@ galaxy_info:
|
||||
# - Apache
|
||||
# - CC-BY
|
||||
license: {{ license }}
|
||||
|
||||
min_ansible_version: {{ min_ansible_version }}
|
||||
|
||||
# Optionally specify the branch Galaxy will use when accessing the GitHub
|
||||
# repo for this role. During role install, if no tags are available,
|
||||
# Galaxy will use this branch. During import Galaxy will access files on
|
||||
# this branch. If travis integration is cofigured, only notification for this
|
||||
# branch will be accepted. Otherwise, in all cases, the repo's default branch
|
||||
# (usually master) will be used.
|
||||
#github_branch:
|
||||
|
||||
#
|
||||
# Below are all platforms currently available. Just uncomment
|
||||
# the ones that apply to your role. If you don't see your
|
||||
@@ -28,6 +40,7 @@ galaxy_info:
|
||||
# - {{ version }}
|
||||
{%- endfor %}
|
||||
{%- endfor %}
|
||||
|
||||
galaxy_tags: []
|
||||
# List tags for your role here, one per line. A tag is
|
||||
# a keyword that describes and categorizes the role.
|
||||
@@ -36,6 +49,7 @@ galaxy_info:
|
||||
#
|
||||
# NOTE: A tag is limited to a single word comprised of
|
||||
# alphanumeric characters. Maximum 20 tags per role.
|
||||
|
||||
dependencies: []
|
||||
# List your role dependencies here, one per line.
|
||||
# Be sure to remove the '[]' above if you add dependencies
|
||||
|
||||
5
lib/ansible/galaxy/data/test_playbook.j2
Normal file
5
lib/ansible/galaxy/data/test_playbook.j2
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
remote_user: root
|
||||
roles:
|
||||
- {{ role_name }}
|
||||
29
lib/ansible/galaxy/data/travis.j2
Normal file
29
lib/ansible/galaxy/data/travis.j2
Normal file
@@ -0,0 +1,29 @@
|
||||
---
|
||||
language: python
|
||||
python: "2.7"
|
||||
|
||||
# Use the new container infrastructure
|
||||
sudo: false
|
||||
|
||||
# Install ansible
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- python-pip
|
||||
|
||||
install:
|
||||
# Install ansible
|
||||
- pip install ansible
|
||||
|
||||
# Check ansible version
|
||||
- ansible --version
|
||||
|
||||
# Create ansible.cfg with correct roles_path
|
||||
- printf '[defaults]\nroles_path=../' >ansible.cfg
|
||||
|
||||
script:
|
||||
# Basic role syntax check
|
||||
- ansible-playbook tests/test.yml -i tests/inventory --syntax-check
|
||||
|
||||
notifications:
|
||||
webhooks: https://galaxy.ansible.com/api/v1/notifications/
|
||||
113
lib/ansible/galaxy/login.py
Normal file
113
lib/ansible/galaxy/login.py
Normal file
@@ -0,0 +1,113 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
########################################################################
|
||||
#
|
||||
# (C) 2015, Chris Houseknecht <chouse@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
########################################################################
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import getpass
|
||||
import json
|
||||
import urllib
|
||||
|
||||
from urllib2 import quote as urlquote, HTTPError
|
||||
from urlparse import urlparse
|
||||
|
||||
from ansible.errors import AnsibleError, AnsibleOptionsError
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.utils.color import stringc
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
except ImportError:
|
||||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
class GalaxyLogin(object):
|
||||
''' Class to handle authenticating user with Galaxy API prior to performing CUD operations '''
|
||||
|
||||
GITHUB_AUTH = 'https://api.github.com/authorizations'
|
||||
|
||||
def __init__(self, galaxy, github_token=None):
|
||||
self.galaxy = galaxy
|
||||
self.github_username = None
|
||||
self.github_password = None
|
||||
|
||||
if github_token == None:
|
||||
self.get_credentials()
|
||||
|
||||
def get_credentials(self):
|
||||
display.display(u'\n\n' + "We need your " + stringc("Github login",'bright cyan') +
|
||||
" to identify you.", screen_only=True)
|
||||
display.display("This information will " + stringc("not be sent to Galaxy",'bright cyan') +
|
||||
", only to " + stringc("api.github.com.","yellow"), screen_only=True)
|
||||
display.display("The password will not be displayed." + u'\n\n', screen_only=True)
|
||||
display.display("Use " + stringc("--github-token",'yellow') +
|
||||
" if you do not want to enter your password." + u'\n\n', screen_only=True)
|
||||
|
||||
try:
|
||||
self.github_username = raw_input("Github Username: ")
|
||||
except:
|
||||
pass
|
||||
|
||||
try:
|
||||
self.github_password = getpass.getpass("Password for %s: " % self.github_username)
|
||||
except:
|
||||
pass
|
||||
|
||||
if not self.github_username or not self.github_password:
|
||||
raise AnsibleError("Invalid Github credentials. Username and password are required.")
|
||||
|
||||
def remove_github_token(self):
|
||||
'''
|
||||
If for some reason an ansible-galaxy token was left from a prior login, remove it. We cannot
|
||||
retrieve the token after creation, so we are forced to create a new one.
|
||||
'''
|
||||
try:
|
||||
tokens = json.load(open_url(self.GITHUB_AUTH, url_username=self.github_username,
|
||||
url_password=self.github_password, force_basic_auth=True,))
|
||||
except HTTPError as e:
|
||||
res = json.load(e)
|
||||
raise AnsibleError(res['message'])
|
||||
|
||||
for token in tokens:
|
||||
if token['note'] == 'ansible-galaxy login':
|
||||
display.vvvvv('removing token: %s' % token['token_last_eight'])
|
||||
try:
|
||||
open_url('https://api.github.com/authorizations/%d' % token['id'], url_username=self.github_username,
|
||||
url_password=self.github_password, method='DELETE', force_basic_auth=True,)
|
||||
except HTTPError as e:
|
||||
res = json.load(e)
|
||||
raise AnsibleError(res['message'])
|
||||
|
||||
def create_github_token(self):
|
||||
'''
|
||||
Create a personal authorization token with a note of 'ansible-galaxy login'
|
||||
'''
|
||||
self.remove_github_token()
|
||||
args = json.dumps({"scopes":["public_repo"], "note":"ansible-galaxy login"})
|
||||
try:
|
||||
data = json.load(open_url(self.GITHUB_AUTH, url_username=self.github_username,
|
||||
url_password=self.github_password, force_basic_auth=True, data=args))
|
||||
except HTTPError as e:
|
||||
res = json.load(e)
|
||||
raise AnsibleError(res['message'])
|
||||
return data['token']
|
||||
@@ -46,7 +46,7 @@ class GalaxyRole(object):
|
||||
SUPPORTED_SCMS = set(['git', 'hg'])
|
||||
META_MAIN = os.path.join('meta', 'main.yml')
|
||||
META_INSTALL = os.path.join('meta', '.galaxy_install_info')
|
||||
ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars')
|
||||
ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars','tests')
|
||||
|
||||
|
||||
def __init__(self, galaxy, name, src=None, version=None, scm=None, path=None):
|
||||
@@ -130,13 +130,11 @@ class GalaxyRole(object):
|
||||
install_date=datetime.datetime.utcnow().strftime("%c"),
|
||||
)
|
||||
info_path = os.path.join(self.path, self.META_INSTALL)
|
||||
try:
|
||||
f = open(info_path, 'w+')
|
||||
self._install_info = yaml.safe_dump(info, f)
|
||||
except:
|
||||
return False
|
||||
finally:
|
||||
f.close()
|
||||
with open(info_path, 'w+') as f:
|
||||
try:
|
||||
self._install_info = yaml.safe_dump(info, f)
|
||||
except:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
@@ -198,10 +196,10 @@ class GalaxyRole(object):
|
||||
role_data = self.src
|
||||
tmp_file = self.fetch(role_data)
|
||||
else:
|
||||
api = GalaxyAPI(self.galaxy, self.options.api_server)
|
||||
api = GalaxyAPI(self.galaxy)
|
||||
role_data = api.lookup_role_by_name(self.src)
|
||||
if not role_data:
|
||||
raise AnsibleError("- sorry, %s was not found on %s." % (self.src, self.options.api_server))
|
||||
raise AnsibleError("- sorry, %s was not found on %s." % (self.src, api.api_server))
|
||||
|
||||
role_versions = api.fetch_role_related('versions', role_data['id'])
|
||||
if not self.version:
|
||||
@@ -213,8 +211,10 @@ class GalaxyRole(object):
|
||||
loose_versions = [LooseVersion(a.get('name',None)) for a in role_versions]
|
||||
loose_versions.sort()
|
||||
self.version = str(loose_versions[-1])
|
||||
elif role_data.get('github_branch', None):
|
||||
self.version = role_data['github_branch']
|
||||
else:
|
||||
self.version = 'master'
|
||||
self.version = 'master'
|
||||
elif self.version != 'master':
|
||||
if role_versions and self.version not in [a.get('name', None) for a in role_versions]:
|
||||
raise AnsibleError("- the specified version (%s) of %s was not found in the list of available versions (%s)." % (self.version, self.name, role_versions))
|
||||
|
||||
67
lib/ansible/galaxy/token.py
Normal file
67
lib/ansible/galaxy/token.py
Normal file
@@ -0,0 +1,67 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
########################################################################
|
||||
#
|
||||
# (C) 2015, Chris Houseknecht <chouse@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
########################################################################
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
import yaml
|
||||
from stat import *
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
except ImportError:
|
||||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
|
||||
class GalaxyToken(object):
|
||||
''' Class to storing and retrieving token in ~/.ansible_galaxy '''
|
||||
|
||||
def __init__(self):
|
||||
self.file = os.path.expanduser("~") + '/.ansible_galaxy'
|
||||
self.config = yaml.safe_load(self.__open_config_for_read())
|
||||
if not self.config:
|
||||
self.config = {}
|
||||
|
||||
def __open_config_for_read(self):
|
||||
if os.path.isfile(self.file):
|
||||
display.vvv('Opened %s' % self.file)
|
||||
return open(self.file, 'r')
|
||||
# config.yml not found, create and chomd u+rw
|
||||
f = open(self.file,'w')
|
||||
f.close()
|
||||
os.chmod(self.file,S_IRUSR|S_IWUSR) # owner has +rw
|
||||
display.vvv('Created %s' % self.file)
|
||||
return open(self.file, 'r')
|
||||
|
||||
def set(self, token):
|
||||
self.config['token'] = token
|
||||
self.save()
|
||||
|
||||
def get(self):
|
||||
return self.config.get('token', None)
|
||||
|
||||
def save(self):
|
||||
with open(self.file,'w') as f:
|
||||
yaml.safe_dump(self.config,f,default_flow_style=False)
|
||||
|
||||
@@ -78,6 +78,10 @@ class Inventory(object):
|
||||
self._restriction = None
|
||||
self._subset = None
|
||||
|
||||
# clear the cache here, which is only useful if more than
|
||||
# one Inventory objects are created when using the API directly
|
||||
self.clear_pattern_cache()
|
||||
|
||||
self.parse_inventory(host_list)
|
||||
|
||||
def serialize(self):
|
||||
@@ -109,7 +113,12 @@ class Inventory(object):
|
||||
pass
|
||||
elif isinstance(host_list, list):
|
||||
for h in host_list:
|
||||
(host, port) = parse_address(h, allow_ranges=False)
|
||||
try:
|
||||
(host, port) = parse_address(h, allow_ranges=False)
|
||||
except AnsibleError as e:
|
||||
display.vvv("Unable to parse address from hostname, leaving unchanged: %s" % to_unicode(e))
|
||||
host = h
|
||||
port = None
|
||||
all.add_host(Host(host, port))
|
||||
elif self._loader.path_exists(host_list):
|
||||
#TODO: switch this to a plugin loader and a 'condition' per plugin on which it should be tried, restoring 'inventory pllugins'
|
||||
@@ -178,25 +187,26 @@ class Inventory(object):
|
||||
if self._restriction:
|
||||
pattern_hash += u":%s" % to_unicode(self._restriction)
|
||||
|
||||
if pattern_hash in HOSTS_PATTERNS_CACHE:
|
||||
return HOSTS_PATTERNS_CACHE[pattern_hash][:]
|
||||
if pattern_hash not in HOSTS_PATTERNS_CACHE:
|
||||
|
||||
patterns = Inventory.split_host_pattern(pattern)
|
||||
hosts = self._evaluate_patterns(patterns)
|
||||
patterns = Inventory.split_host_pattern(pattern)
|
||||
hosts = self._evaluate_patterns(patterns)
|
||||
|
||||
# mainly useful for hostvars[host] access
|
||||
if not ignore_limits_and_restrictions:
|
||||
# exclude hosts not in a subset, if defined
|
||||
if self._subset:
|
||||
subset = self._evaluate_patterns(self._subset)
|
||||
hosts = [ h for h in hosts if h in subset ]
|
||||
# mainly useful for hostvars[host] access
|
||||
if not ignore_limits_and_restrictions:
|
||||
# exclude hosts not in a subset, if defined
|
||||
if self._subset:
|
||||
subset = self._evaluate_patterns(self._subset)
|
||||
hosts = [ h for h in hosts if h in subset ]
|
||||
|
||||
# exclude hosts mentioned in any restriction (ex: failed hosts)
|
||||
if self._restriction is not None:
|
||||
hosts = [ h for h in hosts if h in self._restriction ]
|
||||
# exclude hosts mentioned in any restriction (ex: failed hosts)
|
||||
if self._restriction is not None:
|
||||
hosts = [ h for h in hosts if h in self._restriction ]
|
||||
|
||||
HOSTS_PATTERNS_CACHE[pattern_hash] = hosts[:]
|
||||
return hosts
|
||||
seen = set()
|
||||
HOSTS_PATTERNS_CACHE[pattern_hash] = [x for x in hosts if x not in seen and not seen.add(x)]
|
||||
|
||||
return HOSTS_PATTERNS_CACHE[pattern_hash][:]
|
||||
|
||||
@classmethod
|
||||
def split_host_pattern(cls, pattern):
|
||||
@@ -227,15 +237,13 @@ class Inventory(object):
|
||||
# If it doesn't, it could still be a single pattern. This accounts for
|
||||
# non-separator uses of colons: IPv6 addresses and [x:y] host ranges.
|
||||
else:
|
||||
(base, port) = parse_address(pattern, allow_ranges=True)
|
||||
if base:
|
||||
try:
|
||||
(base, port) = parse_address(pattern, allow_ranges=True)
|
||||
patterns = [pattern]
|
||||
|
||||
# The only other case we accept is a ':'-separated list of patterns.
|
||||
# This mishandles IPv6 addresses, and is retained only for backwards
|
||||
# compatibility.
|
||||
|
||||
else:
|
||||
except:
|
||||
# The only other case we accept is a ':'-separated list of patterns.
|
||||
# This mishandles IPv6 addresses, and is retained only for backwards
|
||||
# compatibility.
|
||||
patterns = re.findall(
|
||||
r'''(?: # We want to match something comprising:
|
||||
[^\s:\[\]] # (anything other than whitespace or ':[]'
|
||||
@@ -388,7 +396,7 @@ class Inventory(object):
|
||||
end = -1
|
||||
subscript = (int(start), int(end))
|
||||
if sep == '-':
|
||||
display.deprecated("Use [x:y] inclusive subscripts instead of [x-y]", version=2.0, removed=True)
|
||||
display.warning("Use [x:y] inclusive subscripts instead of [x-y] which has been removed")
|
||||
|
||||
return (pattern, subscript)
|
||||
|
||||
@@ -455,6 +463,8 @@ class Inventory(object):
|
||||
|
||||
def clear_pattern_cache(self):
|
||||
''' called exclusively by the add_host plugin to allow patterns to be recalculated '''
|
||||
global HOSTS_PATTERNS_CACHE
|
||||
HOSTS_PATTERNS_CACHE = {}
|
||||
self._pattern_cache = {}
|
||||
|
||||
def groups_for_host(self, host):
|
||||
@@ -729,12 +739,12 @@ class Inventory(object):
|
||||
|
||||
if group and host is None:
|
||||
# load vars in dir/group_vars/name_of_group
|
||||
base_path = os.path.realpath(os.path.join(basedir, "group_vars/%s" % group.name))
|
||||
results = self._variable_manager.add_group_vars_file(base_path, self._loader)
|
||||
base_path = os.path.realpath(os.path.join(to_unicode(basedir, errors='strict'), "group_vars/%s" % group.name))
|
||||
results = combine_vars(results, self._variable_manager.add_group_vars_file(base_path, self._loader))
|
||||
elif host and group is None:
|
||||
# same for hostvars in dir/host_vars/name_of_host
|
||||
base_path = os.path.realpath(os.path.join(basedir, "host_vars/%s" % host.name))
|
||||
results = self._variable_manager.add_host_vars_file(base_path, self._loader)
|
||||
base_path = os.path.realpath(os.path.join(to_unicode(basedir, errors='strict'), "host_vars/%s" % host.name))
|
||||
results = combine_vars(results, self._variable_manager.add_host_vars_file(base_path, self._loader))
|
||||
|
||||
# all done, results is a dictionary of variables for this particular host.
|
||||
return results
|
||||
|
||||
@@ -192,6 +192,8 @@ class InventoryDirectory(object):
|
||||
if group.name not in self.groups:
|
||||
# it's brand new, add him!
|
||||
self.groups[group.name] = group
|
||||
# the Group class does not (yet) implement __eq__/__ne__,
|
||||
# so unlike Host we do a regular comparison here
|
||||
if self.groups[group.name] != group:
|
||||
# different object, merge
|
||||
self._merge_groups(self.groups[group.name], group)
|
||||
@@ -200,6 +202,9 @@ class InventoryDirectory(object):
|
||||
if host.name not in self.hosts:
|
||||
# Papa's got a brand new host
|
||||
self.hosts[host.name] = host
|
||||
# because the __eq__/__ne__ methods in Host() compare the
|
||||
# name fields rather than references, we use id() here to
|
||||
# do the object comparison for merges
|
||||
if self.hosts[host.name] != host:
|
||||
# different object, merge
|
||||
self._merge_hosts(self.hosts[host.name], host)
|
||||
|
||||
@@ -19,6 +19,8 @@
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import uuid
|
||||
|
||||
from ansible.inventory.group import Group
|
||||
from ansible.utils.vars import combine_vars
|
||||
|
||||
@@ -38,7 +40,7 @@ class Host:
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, Host):
|
||||
return False
|
||||
return self.name == other.name
|
||||
return self._uuid == other._uuid
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
@@ -55,6 +57,7 @@ class Host:
|
||||
name=self.name,
|
||||
vars=self.vars.copy(),
|
||||
address=self.address,
|
||||
uuid=self._uuid,
|
||||
gathered_facts=self._gathered_facts,
|
||||
groups=groups,
|
||||
)
|
||||
@@ -65,6 +68,7 @@ class Host:
|
||||
self.name = data.get('name')
|
||||
self.vars = data.get('vars', dict())
|
||||
self.address = data.get('address', '')
|
||||
self._uuid = data.get('uuid', uuid.uuid4())
|
||||
|
||||
groups = data.get('groups', [])
|
||||
for group_data in groups:
|
||||
@@ -84,6 +88,7 @@ class Host:
|
||||
self.set_variable('ansible_port', int(port))
|
||||
|
||||
self._gathered_facts = False
|
||||
self._uuid = uuid.uuid4()
|
||||
|
||||
def __repr__(self):
|
||||
return self.get_name()
|
||||
|
||||
@@ -124,6 +124,9 @@ class InventoryParser(object):
|
||||
del pending_declarations[groupname]
|
||||
|
||||
continue
|
||||
elif line.startswith('['):
|
||||
self._raise_error("Invalid section entry: '%s'. Please make sure that there are no spaces" % line + \
|
||||
"in the section entry, and that there are no other invalid characters")
|
||||
|
||||
# It's not a section, so the current state tells us what kind of
|
||||
# definition it must be. The individual parsers will raise an
|
||||
@@ -264,9 +267,12 @@ class InventoryParser(object):
|
||||
# Can the given hostpattern be parsed as a host with an optional port
|
||||
# specification?
|
||||
|
||||
(pattern, port) = parse_address(hostpattern, allow_ranges=True)
|
||||
if not pattern:
|
||||
self._raise_error("Can't parse '%s' as host[:port]" % hostpattern)
|
||||
try:
|
||||
(pattern, port) = parse_address(hostpattern, allow_ranges=True)
|
||||
except:
|
||||
# not a recognizable host pattern
|
||||
pattern = hostpattern
|
||||
port = None
|
||||
|
||||
# Once we have separated the pattern, we expand it into list of one or
|
||||
# more hostnames, depending on whether it contains any [x:y] ranges.
|
||||
|
||||
@@ -31,6 +31,7 @@ from ansible.errors import AnsibleError
|
||||
from ansible.inventory.host import Host
|
||||
from ansible.inventory.group import Group
|
||||
from ansible.module_utils.basic import json_dict_bytes_to_unicode
|
||||
from ansible.utils.unicode import to_str, to_unicode
|
||||
|
||||
|
||||
class InventoryScript:
|
||||
@@ -57,12 +58,17 @@ class InventoryScript:
|
||||
if sp.returncode != 0:
|
||||
raise AnsibleError("Inventory script (%s) had an execution error: %s " % (filename,stderr))
|
||||
|
||||
self.data = stdout
|
||||
# make sure script output is unicode so that json loader will output
|
||||
# unicode strings itself
|
||||
try:
|
||||
self.data = to_unicode(stdout, errors="strict")
|
||||
except Exception as e:
|
||||
raise AnsibleError("inventory data from {0} contained characters that cannot be interpreted as UTF-8: {1}".format(to_str(self.filename), to_str(e)))
|
||||
|
||||
# see comment about _meta below
|
||||
self.host_vars_from_top = None
|
||||
self._parse(stderr)
|
||||
|
||||
|
||||
def _parse(self, err):
|
||||
|
||||
all_hosts = {}
|
||||
@@ -72,13 +78,11 @@ class InventoryScript:
|
||||
self.raw = self._loader.load(self.data)
|
||||
except Exception as e:
|
||||
sys.stderr.write(err + "\n")
|
||||
raise AnsibleError("failed to parse executable inventory script results from {0}: {1}".format(self.filename, str(e)))
|
||||
raise AnsibleError("failed to parse executable inventory script results from {0}: {1}".format(to_str(self.filename), to_str(e)))
|
||||
|
||||
if not isinstance(self.raw, Mapping):
|
||||
sys.stderr.write(err + "\n")
|
||||
raise AnsibleError("failed to parse executable inventory script results from {0}: data needs to be formatted as a json dict".format(self.filename))
|
||||
|
||||
self.raw = json_dict_bytes_to_unicode(self.raw)
|
||||
raise AnsibleError("failed to parse executable inventory script results from {0}: data needs to be formatted as a json dict".format(to_str(self.filename)))
|
||||
|
||||
group = None
|
||||
for (group_name, data) in self.raw.items():
|
||||
@@ -103,7 +107,7 @@ class InventoryScript:
|
||||
if not isinstance(data, dict):
|
||||
data = {'hosts': data}
|
||||
# is not those subkeys, then simplified syntax, host with vars
|
||||
elif not any(k in data for k in ('hosts','vars')):
|
||||
elif not any(k in data for k in ('hosts','vars','children')):
|
||||
data = {'hosts': [group_name], 'vars': data}
|
||||
|
||||
if 'hosts' in data:
|
||||
@@ -112,7 +116,7 @@ class InventoryScript:
|
||||
"data for the host list:\n %s" % (group_name, data))
|
||||
|
||||
for hostname in data['hosts']:
|
||||
if not hostname in all_hosts:
|
||||
if hostname not in all_hosts:
|
||||
all_hosts[hostname] = Host(hostname)
|
||||
host = all_hosts[hostname]
|
||||
group.add_host(host)
|
||||
@@ -145,10 +149,12 @@ class InventoryScript:
|
||||
def get_host_variables(self, host):
|
||||
""" Runs <script> --host <hostname> to determine additional host variables """
|
||||
if self.host_vars_from_top is not None:
|
||||
got = self.host_vars_from_top.get(host.name, {})
|
||||
try:
|
||||
got = self.host_vars_from_top.get(host.name, {})
|
||||
except AttributeError as e:
|
||||
raise AnsibleError("Improperly formated host information for %s: %s" % (host.name,to_str(e)))
|
||||
return got
|
||||
|
||||
|
||||
cmd = [self.filename, "--host", host.name]
|
||||
try:
|
||||
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
@@ -161,4 +167,3 @@ class InventoryScript:
|
||||
return json_dict_bytes_to_unicode(self._loader.load(out))
|
||||
except ValueError:
|
||||
raise AnsibleError("could not parse post variable response: %s, %s" % (cmd, out))
|
||||
|
||||
|
||||
@@ -34,8 +34,8 @@ ANSIBLE_VERSION = "<<ANSIBLE_VERSION>>"
|
||||
MODULE_ARGS = "<<INCLUDE_ANSIBLE_MODULE_ARGS>>"
|
||||
MODULE_COMPLEX_ARGS = "<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>"
|
||||
|
||||
BOOLEANS_TRUE = ['yes', 'on', '1', 'true', 1]
|
||||
BOOLEANS_FALSE = ['no', 'off', '0', 'false', 0]
|
||||
BOOLEANS_TRUE = ['yes', 'on', '1', 'true', 1, True]
|
||||
BOOLEANS_FALSE = ['no', 'off', '0', 'false', 0, False]
|
||||
BOOLEANS = BOOLEANS_TRUE + BOOLEANS_FALSE
|
||||
|
||||
SELINUX_SPECIAL_FS="<<SELINUX_SPECIAL_FILESYSTEMS>>"
|
||||
@@ -213,7 +213,7 @@ except ImportError:
|
||||
elif isinstance(node, ast.List):
|
||||
return list(map(_convert, node.nodes))
|
||||
elif isinstance(node, ast.Dict):
|
||||
return dict((_convert(k), _convert(v)) for k, v in node.items)
|
||||
return dict((_convert(k), _convert(v)) for k, v in node.items())
|
||||
elif isinstance(node, ast.Name):
|
||||
if node.name in _safe_names:
|
||||
return _safe_names[node.name]
|
||||
@@ -369,7 +369,12 @@ def return_values(obj):
|
||||
sensitive values pre-jsonification."""
|
||||
if isinstance(obj, basestring):
|
||||
if obj:
|
||||
yield obj
|
||||
if isinstance(obj, bytes):
|
||||
yield obj
|
||||
else:
|
||||
# Unicode objects should all convert to utf-8
|
||||
# (still must deal with surrogateescape on python3)
|
||||
yield obj.encode('utf-8')
|
||||
return
|
||||
elif isinstance(obj, Sequence):
|
||||
for element in obj:
|
||||
@@ -391,10 +396,22 @@ def remove_values(value, no_log_strings):
|
||||
""" Remove strings in no_log_strings from value. If value is a container
|
||||
type, then remove a lot more"""
|
||||
if isinstance(value, basestring):
|
||||
if value in no_log_strings:
|
||||
if isinstance(value, unicode):
|
||||
# This should work everywhere on python2. Need to check
|
||||
# surrogateescape on python3
|
||||
bytes_value = value.encode('utf-8')
|
||||
value_is_unicode = True
|
||||
else:
|
||||
bytes_value = value
|
||||
value_is_unicode = False
|
||||
if bytes_value in no_log_strings:
|
||||
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
|
||||
for omit_me in no_log_strings:
|
||||
value = value.replace(omit_me, '*' * 8)
|
||||
bytes_value = bytes_value.replace(omit_me, '*' * 8)
|
||||
if value_is_unicode:
|
||||
value = unicode(bytes_value, 'utf-8', errors='replace')
|
||||
else:
|
||||
value = bytes_value
|
||||
elif isinstance(value, Sequence):
|
||||
return [remove_values(elem, no_log_strings) for elem in value]
|
||||
elif isinstance(value, Mapping):
|
||||
@@ -497,8 +514,11 @@ class AnsibleModule(object):
|
||||
self.no_log = no_log
|
||||
self.cleanup_files = []
|
||||
self._debug = False
|
||||
self._diff = False
|
||||
self._verbosity = 0
|
||||
|
||||
self.aliases = {}
|
||||
self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log', '_ansible_debug', '_ansible_diff', '_ansible_verbosity']
|
||||
|
||||
if add_file_common_args:
|
||||
for k, v in FILE_COMMON_ARGUMENTS.items():
|
||||
@@ -507,6 +527,15 @@ class AnsibleModule(object):
|
||||
|
||||
self.params = self._load_params()
|
||||
|
||||
# append to legal_inputs and then possibly check against them
|
||||
try:
|
||||
self.aliases = self._handle_aliases()
|
||||
except Exception:
|
||||
e = get_exception()
|
||||
# use exceptions here cause its not safe to call vail json until no_log is processed
|
||||
print('{"failed": true, "msg": "Module alias error: %s"}' % str(e))
|
||||
sys.exit(1)
|
||||
|
||||
# Save parameter values that should never be logged
|
||||
self.no_log_values = set()
|
||||
# Use the argspec to determine which args are no_log
|
||||
@@ -517,15 +546,10 @@ class AnsibleModule(object):
|
||||
if no_log_object:
|
||||
self.no_log_values.update(return_values(no_log_object))
|
||||
|
||||
# check the locale as set by the current environment, and
|
||||
# reset to LANG=C if it's an invalid/unavailable locale
|
||||
# check the locale as set by the current environment, and reset to
|
||||
# a known valid (LANG=C) if it's an invalid/unavailable locale
|
||||
self._check_locale()
|
||||
|
||||
self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log', '_ansible_debug']
|
||||
|
||||
# append to legal_inputs and then possibly check against them
|
||||
self.aliases = self._handle_aliases()
|
||||
|
||||
self._check_arguments(check_invalid_arguments)
|
||||
|
||||
# check exclusive early
|
||||
@@ -554,7 +578,7 @@ class AnsibleModule(object):
|
||||
|
||||
self._set_defaults(pre=False)
|
||||
|
||||
if not self.no_log:
|
||||
if not self.no_log and self._verbosity >= 3:
|
||||
self._log_invocation()
|
||||
|
||||
# finally, make sure we're in a sane working dir
|
||||
@@ -728,7 +752,7 @@ class AnsibleModule(object):
|
||||
context = self.selinux_default_context(path)
|
||||
return self.set_context_if_different(path, context, False)
|
||||
|
||||
def set_context_if_different(self, path, context, changed):
|
||||
def set_context_if_different(self, path, context, changed, diff=None):
|
||||
|
||||
if not HAVE_SELINUX or not self.selinux_enabled():
|
||||
return changed
|
||||
@@ -749,6 +773,14 @@ class AnsibleModule(object):
|
||||
new_context[i] = cur_context[i]
|
||||
|
||||
if cur_context != new_context:
|
||||
if diff is not None:
|
||||
if 'before' not in diff:
|
||||
diff['before'] = {}
|
||||
diff['before']['secontext'] = cur_context
|
||||
if 'after' not in diff:
|
||||
diff['after'] = {}
|
||||
diff['after']['secontext'] = new_context
|
||||
|
||||
try:
|
||||
if self.check_mode:
|
||||
return True
|
||||
@@ -762,7 +794,7 @@ class AnsibleModule(object):
|
||||
changed = True
|
||||
return changed
|
||||
|
||||
def set_owner_if_different(self, path, owner, changed):
|
||||
def set_owner_if_different(self, path, owner, changed, diff=None):
|
||||
path = os.path.expanduser(path)
|
||||
if owner is None:
|
||||
return changed
|
||||
@@ -775,6 +807,15 @@ class AnsibleModule(object):
|
||||
except KeyError:
|
||||
self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner)
|
||||
if orig_uid != uid:
|
||||
|
||||
if diff is not None:
|
||||
if 'before' not in diff:
|
||||
diff['before'] = {}
|
||||
diff['before']['owner'] = orig_uid
|
||||
if 'after' not in diff:
|
||||
diff['after'] = {}
|
||||
diff['after']['owner'] = uid
|
||||
|
||||
if self.check_mode:
|
||||
return True
|
||||
try:
|
||||
@@ -784,7 +825,7 @@ class AnsibleModule(object):
|
||||
changed = True
|
||||
return changed
|
||||
|
||||
def set_group_if_different(self, path, group, changed):
|
||||
def set_group_if_different(self, path, group, changed, diff=None):
|
||||
path = os.path.expanduser(path)
|
||||
if group is None:
|
||||
return changed
|
||||
@@ -797,6 +838,15 @@ class AnsibleModule(object):
|
||||
except KeyError:
|
||||
self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group)
|
||||
if orig_gid != gid:
|
||||
|
||||
if diff is not None:
|
||||
if 'before' not in diff:
|
||||
diff['before'] = {}
|
||||
diff['before']['group'] = orig_gid
|
||||
if 'after' not in diff:
|
||||
diff['after'] = {}
|
||||
diff['after']['group'] = gid
|
||||
|
||||
if self.check_mode:
|
||||
return True
|
||||
try:
|
||||
@@ -806,7 +856,7 @@ class AnsibleModule(object):
|
||||
changed = True
|
||||
return changed
|
||||
|
||||
def set_mode_if_different(self, path, mode, changed):
|
||||
def set_mode_if_different(self, path, mode, changed, diff=None):
|
||||
path = os.path.expanduser(path)
|
||||
path_stat = os.lstat(path)
|
||||
|
||||
@@ -828,6 +878,15 @@ class AnsibleModule(object):
|
||||
prev_mode = stat.S_IMODE(path_stat.st_mode)
|
||||
|
||||
if prev_mode != mode:
|
||||
|
||||
if diff is not None:
|
||||
if 'before' not in diff:
|
||||
diff['before'] = {}
|
||||
diff['before']['mode'] = oct(prev_mode)
|
||||
if 'after' not in diff:
|
||||
diff['after'] = {}
|
||||
diff['after']['mode'] = oct(mode)
|
||||
|
||||
if self.check_mode:
|
||||
return True
|
||||
# FIXME: comparison against string above will cause this to be executed
|
||||
@@ -961,27 +1020,27 @@ class AnsibleModule(object):
|
||||
or_reduce = lambda mode, perm: mode | user_perms_to_modes[user][perm]
|
||||
return reduce(or_reduce, perms, 0)
|
||||
|
||||
def set_fs_attributes_if_different(self, file_args, changed):
|
||||
def set_fs_attributes_if_different(self, file_args, changed, diff=None):
|
||||
# set modes owners and context as needed
|
||||
changed = self.set_context_if_different(
|
||||
file_args['path'], file_args['secontext'], changed
|
||||
file_args['path'], file_args['secontext'], changed, diff
|
||||
)
|
||||
changed = self.set_owner_if_different(
|
||||
file_args['path'], file_args['owner'], changed
|
||||
file_args['path'], file_args['owner'], changed, diff
|
||||
)
|
||||
changed = self.set_group_if_different(
|
||||
file_args['path'], file_args['group'], changed
|
||||
file_args['path'], file_args['group'], changed, diff
|
||||
)
|
||||
changed = self.set_mode_if_different(
|
||||
file_args['path'], file_args['mode'], changed
|
||||
file_args['path'], file_args['mode'], changed, diff
|
||||
)
|
||||
return changed
|
||||
|
||||
def set_directory_attributes_if_different(self, file_args, changed):
|
||||
return self.set_fs_attributes_if_different(file_args, changed)
|
||||
def set_directory_attributes_if_different(self, file_args, changed, diff=None):
|
||||
return self.set_fs_attributes_if_different(file_args, changed, diff)
|
||||
|
||||
def set_file_attributes_if_different(self, file_args, changed):
|
||||
return self.set_fs_attributes_if_different(file_args, changed)
|
||||
def set_file_attributes_if_different(self, file_args, changed, diff=None):
|
||||
return self.set_fs_attributes_if_different(file_args, changed, diff)
|
||||
|
||||
def add_path_info(self, kwargs):
|
||||
'''
|
||||
@@ -1034,7 +1093,6 @@ class AnsibleModule(object):
|
||||
# as it would be returned by locale.getdefaultlocale()
|
||||
locale.setlocale(locale.LC_ALL, '')
|
||||
except locale.Error:
|
||||
e = get_exception()
|
||||
# fallback to the 'C' locale, which may cause unicode
|
||||
# issues but is preferable to simply failing because
|
||||
# of an unknown locale
|
||||
@@ -1047,6 +1105,7 @@ class AnsibleModule(object):
|
||||
self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" % e)
|
||||
|
||||
def _handle_aliases(self):
|
||||
# this uses exceptions as it happens before we can safely call fail_json
|
||||
aliases_results = {} #alias:canon
|
||||
for (k,v) in self.argument_spec.items():
|
||||
self._legal_inputs.append(k)
|
||||
@@ -1055,11 +1114,11 @@ class AnsibleModule(object):
|
||||
required = v.get('required', False)
|
||||
if default is not None and required:
|
||||
# not alias specific but this is a good place to check this
|
||||
self.fail_json(msg="internal error: required and default are mutually exclusive for %s" % k)
|
||||
raise Exception("internal error: required and default are mutually exclusive for %s" % k)
|
||||
if aliases is None:
|
||||
continue
|
||||
if type(aliases) != list:
|
||||
self.fail_json(msg='internal error: aliases must be a list')
|
||||
raise Exception('internal error: aliases must be a list')
|
||||
for alias in aliases:
|
||||
self._legal_inputs.append(alias)
|
||||
aliases_results[alias] = k
|
||||
@@ -1082,6 +1141,12 @@ class AnsibleModule(object):
|
||||
elif k == '_ansible_debug':
|
||||
self._debug = self.boolean(v)
|
||||
|
||||
elif k == '_ansible_diff':
|
||||
self._diff = self.boolean(v)
|
||||
|
||||
elif k == '_ansible_verbosity':
|
||||
self._verbosity = v
|
||||
|
||||
elif check_invalid_arguments and k not in self._legal_inputs:
|
||||
self.fail_json(msg="unsupported parameter for module: %s" % k)
|
||||
|
||||
@@ -1257,7 +1322,7 @@ class AnsibleModule(object):
|
||||
if isinstance(value, bool):
|
||||
return value
|
||||
|
||||
if isinstance(value, basestring):
|
||||
if isinstance(value, basestring) or isinstance(value, int):
|
||||
return self.boolean(value)
|
||||
|
||||
raise TypeError('%s cannot be converted to a bool' % type(value))
|
||||
@@ -1414,7 +1479,6 @@ class AnsibleModule(object):
|
||||
self.log(msg, log_args=log_args)
|
||||
|
||||
|
||||
|
||||
def _set_cwd(self):
|
||||
try:
|
||||
cwd = os.getcwd()
|
||||
@@ -1507,6 +1571,8 @@ class AnsibleModule(object):
|
||||
self.add_path_info(kwargs)
|
||||
if not 'changed' in kwargs:
|
||||
kwargs['changed'] = False
|
||||
if 'invocation' not in kwargs:
|
||||
kwargs['invocation'] = {'module_args': self.params}
|
||||
kwargs = remove_values(kwargs, self.no_log_values)
|
||||
self.do_cleanup_files()
|
||||
print(self.jsonify(kwargs))
|
||||
@@ -1517,6 +1583,8 @@ class AnsibleModule(object):
|
||||
self.add_path_info(kwargs)
|
||||
assert 'msg' in kwargs, "implementation error -- msg to explain the error is required"
|
||||
kwargs['failed'] = True
|
||||
if 'invocation' not in kwargs:
|
||||
kwargs['invocation'] = {'module_args': self.params}
|
||||
kwargs = remove_values(kwargs, self.no_log_values)
|
||||
self.do_cleanup_files()
|
||||
print(self.jsonify(kwargs))
|
||||
@@ -1687,25 +1755,29 @@ class AnsibleModule(object):
|
||||
# rename might not preserve context
|
||||
self.set_context_if_different(dest, context, False)
|
||||
|
||||
def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None, use_unsafe_shell=False, prompt_regex=None):
|
||||
def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None, use_unsafe_shell=False, prompt_regex=None, environ_update=None):
|
||||
'''
|
||||
Execute a command, returns rc, stdout, and stderr.
|
||||
args is the command to run
|
||||
If args is a list, the command will be run with shell=False.
|
||||
If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False
|
||||
If args is a string and use_unsafe_shell=True it run with shell=True.
|
||||
Other arguments:
|
||||
- check_rc (boolean) Whether to call fail_json in case of
|
||||
non zero RC. Default is False.
|
||||
- close_fds (boolean) See documentation for subprocess.Popen().
|
||||
Default is True.
|
||||
- executable (string) See documentation for subprocess.Popen().
|
||||
Default is None.
|
||||
- prompt_regex (string) A regex string (not a compiled regex) which
|
||||
can be used to detect prompts in the stdout
|
||||
which would otherwise cause the execution
|
||||
to hang (especially if no input data is
|
||||
specified)
|
||||
|
||||
:arg args: is the command to run
|
||||
* If args is a list, the command will be run with shell=False.
|
||||
* If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False
|
||||
* If args is a string and use_unsafe_shell=True it runs with shell=True.
|
||||
:kw check_rc: Whether to call fail_json in case of non zero RC.
|
||||
Default False
|
||||
:kw close_fds: See documentation for subprocess.Popen(). Default True
|
||||
:kw executable: See documentation for subprocess.Popen(). Default None
|
||||
:kw data: If given, information to write to the stdin of the command
|
||||
:kw binary_data: If False, append a newline to the data. Default False
|
||||
:kw path_prefix: If given, additional path to find the command in.
|
||||
This adds to the PATH environment vairable so helper commands in
|
||||
the same directory can also be found
|
||||
:kw cwd: iIf given, working directory to run the command inside
|
||||
:kw use_unsafe_shell: See `args` parameter. Default False
|
||||
:kw prompt_regex: Regex string (not a compiled regex) which can be
|
||||
used to detect prompts in the stdout which would otherwise cause
|
||||
the execution to hang (especially if no input data is specified)
|
||||
:kwarg environ_update: dictionary to *update* os.environ with
|
||||
'''
|
||||
|
||||
shell = False
|
||||
@@ -1736,10 +1808,15 @@ class AnsibleModule(object):
|
||||
msg = None
|
||||
st_in = None
|
||||
|
||||
# Set a temporary env path if a prefix is passed
|
||||
env=os.environ
|
||||
# Manipulate the environ we'll send to the new process
|
||||
old_env_vals = {}
|
||||
if environ_update:
|
||||
for key, val in environ_update.items():
|
||||
old_env_vals[key] = os.environ.get(key, None)
|
||||
os.environ[key] = val
|
||||
if path_prefix:
|
||||
env['PATH']="%s:%s" % (path_prefix, env['PATH'])
|
||||
old_env_vals['PATH'] = os.environ['PATH']
|
||||
os.environ['PATH'] = "%s:%s" % (path_prefix, os.environ['PATH'])
|
||||
|
||||
# create a printable version of the command for use
|
||||
# in reporting later, which strips out things like
|
||||
@@ -1781,11 +1858,10 @@ class AnsibleModule(object):
|
||||
close_fds=close_fds,
|
||||
stdin=st_in,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE
|
||||
stderr=subprocess.PIPE,
|
||||
env=os.environ,
|
||||
)
|
||||
|
||||
if path_prefix:
|
||||
kwargs['env'] = env
|
||||
if cwd and os.path.isdir(cwd):
|
||||
kwargs['cwd'] = cwd
|
||||
|
||||
@@ -1864,6 +1940,13 @@ class AnsibleModule(object):
|
||||
except:
|
||||
self.fail_json(rc=257, msg=traceback.format_exc(), cmd=clean_args)
|
||||
|
||||
# Restore env settings
|
||||
for key, val in old_env_vals.items():
|
||||
if val is None:
|
||||
del os.environ[key]
|
||||
else:
|
||||
os.environ[key] = val
|
||||
|
||||
if rc != 0 and check_rc:
|
||||
msg = heuristic_log_sanitize(stderr.rstrip(), self.no_log_values)
|
||||
self.fail_json(cmd=clean_args, rc=rc, stdout=stdout, stderr=stderr, msg=msg)
|
||||
|
||||
@@ -78,6 +78,10 @@ class AnsibleCloudStack(object):
|
||||
self.returns = {}
|
||||
# these values will be casted to int
|
||||
self.returns_to_int = {}
|
||||
# these keys will be compared case sensitive in self.has_changed()
|
||||
self.case_sensitive_keys = [
|
||||
'id',
|
||||
]
|
||||
|
||||
self.module = module
|
||||
self._connect()
|
||||
@@ -138,16 +142,14 @@ class AnsibleCloudStack(object):
|
||||
continue
|
||||
|
||||
if key in current_dict:
|
||||
|
||||
# API returns string for int in some cases, just to make sure
|
||||
if isinstance(value, int):
|
||||
current_dict[key] = int(current_dict[key])
|
||||
elif isinstance(value, str):
|
||||
current_dict[key] = str(current_dict[key])
|
||||
|
||||
# Only need to detect a singe change, not every item
|
||||
if value != current_dict[key]:
|
||||
if self.case_sensitive_keys and key in self.case_sensitive_keys:
|
||||
if str(value) != str(current_dict[key]):
|
||||
return True
|
||||
# Test for diff in case insensitive way
|
||||
elif str(value).lower() != str(current_dict[key]).lower():
|
||||
return True
|
||||
else:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
@@ -218,7 +220,7 @@ class AnsibleCloudStack(object):
|
||||
vms = self.cs.listVirtualMachines(**args)
|
||||
if vms:
|
||||
for v in vms['virtualmachine']:
|
||||
if vm in [ v['name'], v['displayname'], v['id'] ]:
|
||||
if vm.lower() in [ v['name'].lower(), v['displayname'].lower(), v['id'] ]:
|
||||
self.vm = v
|
||||
return self._get_by_key(key, self.vm)
|
||||
self.module.fail_json(msg="Virtual machine '%s' not found" % vm)
|
||||
@@ -238,7 +240,7 @@ class AnsibleCloudStack(object):
|
||||
|
||||
if zones:
|
||||
for z in zones['zone']:
|
||||
if zone in [ z['name'], z['id'] ]:
|
||||
if zone.lower() in [ z['name'].lower(), z['id'] ]:
|
||||
self.zone = z
|
||||
return self._get_by_key(key, self.zone)
|
||||
self.module.fail_json(msg="zone '%s' not found" % zone)
|
||||
|
||||
@@ -1,155 +0,0 @@
|
||||
#
|
||||
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
"""
|
||||
This module adds shared support for Arista EOS devices using eAPI over
|
||||
HTTP/S transport. It is built on module_utils/urls.py which is required
|
||||
for proper operation.
|
||||
|
||||
In order to use this module, include it as part of a custom
|
||||
module as shown below.
|
||||
|
||||
** Note: The order of the import statements does matter. **
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.urls import *
|
||||
from ansible.module_utils.eapi import *
|
||||
|
||||
The eapi module provides the following common argument spec:
|
||||
|
||||
* host (str) - [Required] The IPv4 address or FQDN of the network device
|
||||
|
||||
* port (str) - Overrides the default port to use for the HTTP/S
|
||||
connection. The default values are 80 for HTTP and
|
||||
443 for HTTPS
|
||||
|
||||
* url_username (str) - [Required] The username to use to authenticate
|
||||
the HTTP/S connection. Aliases: username
|
||||
|
||||
* url_password (str) - [Required] The password to use to authenticate
|
||||
the HTTP/S connection. Aliases: password
|
||||
|
||||
* use_ssl (bool) - Specifies whether or not to use an encrypted (HTTPS)
|
||||
connection or not. The default value is False.
|
||||
|
||||
* enable_mode (bool) - Specifies whether or not to enter `enable` mode
|
||||
prior to executing the command list. The default value is True
|
||||
|
||||
* enable_password (str) - The password for entering `enable` mode
|
||||
on the switch if configured.
|
||||
|
||||
In order to communicate with Arista EOS devices, the eAPI feature
|
||||
must be enabled and configured on the device.
|
||||
|
||||
"""
|
||||
def eapi_argument_spec(spec=None):
|
||||
"""Creates an argument spec for working with eAPI
|
||||
"""
|
||||
arg_spec = url_argument_spec()
|
||||
arg_spec.update(dict(
|
||||
host=dict(required=True),
|
||||
port=dict(),
|
||||
url_username=dict(required=True, aliases=['username']),
|
||||
url_password=dict(required=True, aliases=['password']),
|
||||
use_ssl=dict(default=True, type='bool'),
|
||||
enable_mode=dict(default=True, type='bool'),
|
||||
enable_password=dict()
|
||||
))
|
||||
if spec:
|
||||
arg_spec.update(spec)
|
||||
return arg_spec
|
||||
|
||||
def eapi_url(module):
|
||||
"""Construct a valid Arist eAPI URL
|
||||
"""
|
||||
if module.params['use_ssl']:
|
||||
proto = 'https'
|
||||
else:
|
||||
proto = 'http'
|
||||
host = module.params['host']
|
||||
url = '{}://{}'.format(proto, host)
|
||||
if module.params['port']:
|
||||
url = '{}:{}'.format(url, module.params['port'])
|
||||
return '{}/command-api'.format(url)
|
||||
|
||||
def to_list(arg):
|
||||
"""Convert the argument to a list object
|
||||
"""
|
||||
if isinstance(arg, (list, tuple)):
|
||||
return list(arg)
|
||||
elif arg is not None:
|
||||
return [arg]
|
||||
else:
|
||||
return []
|
||||
|
||||
def eapi_body(commands, encoding, reqid=None):
|
||||
"""Create a valid eAPI JSON-RPC request message
|
||||
"""
|
||||
params = dict(version=1, cmds=to_list(commands), format=encoding)
|
||||
return dict(jsonrpc='2.0', id=reqid, method='runCmds', params=params)
|
||||
|
||||
def eapi_enable_mode(module):
|
||||
"""Build commands for entering `enable` mode on the switch
|
||||
"""
|
||||
if module.params['enable_mode']:
|
||||
passwd = module.params['enable_password']
|
||||
if passwd:
|
||||
return dict(cmd='enable', input=passwd)
|
||||
else:
|
||||
return 'enable'
|
||||
|
||||
def eapi_command(module, commands, encoding='json'):
|
||||
"""Send an ordered list of commands to the device over eAPI
|
||||
"""
|
||||
commands = to_list(commands)
|
||||
url = eapi_url(module)
|
||||
|
||||
enable = eapi_enable_mode(module)
|
||||
if enable:
|
||||
commands.insert(0, enable)
|
||||
|
||||
data = eapi_body(commands, encoding)
|
||||
data = module.jsonify(data)
|
||||
|
||||
headers = {'Content-Type': 'application/json-rpc'}
|
||||
|
||||
response, headers = fetch_url(module, url, data=data, headers=headers,
|
||||
method='POST')
|
||||
|
||||
if headers['status'] != 200:
|
||||
module.fail_json(**headers)
|
||||
|
||||
response = module.from_json(response.read())
|
||||
if 'error' in response:
|
||||
err = response['error']
|
||||
module.fail_json(msg='json-rpc error', **err)
|
||||
|
||||
if enable:
|
||||
response['result'].pop(0)
|
||||
|
||||
return response['result'], headers
|
||||
|
||||
def eapi_configure(module, commands):
|
||||
"""Send configuration commands to the device over eAPI
|
||||
"""
|
||||
commands.insert(0, 'configure')
|
||||
response, headers = eapi_command(module, commands)
|
||||
response.pop(0)
|
||||
return response, headers
|
||||
|
||||
|
||||
@@ -41,21 +41,30 @@ except:
|
||||
HAS_LOOSE_VERSION = False
|
||||
|
||||
|
||||
class AnsibleAWSError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None, **params):
|
||||
profile = params.pop('profile_name', None)
|
||||
params['aws_session_token'] = params.pop('security_token', None)
|
||||
params['verify'] = params.pop('validate_certs', None)
|
||||
|
||||
if conn_type not in ['both', 'resource', 'client']:
|
||||
module.fail_json(msg='There is an issue in the code of the module. You must specify either both, resource or client to the conn_type parameter in the boto3_conn function call')
|
||||
|
||||
if conn_type == 'resource':
|
||||
resource = boto3.session.Session().resource(resource, region_name=region, endpoint_url=endpoint, **params)
|
||||
resource = boto3.session.Session(profile_name=profile).resource(resource, region_name=region, endpoint_url=endpoint, **params)
|
||||
return resource
|
||||
elif conn_type == 'client':
|
||||
client = boto3.session.Session().client(resource, region_name=region, endpoint_url=endpoint, **params)
|
||||
client = boto3.session.Session(profile_name=profile).client(resource, region_name=region, endpoint_url=endpoint, **params)
|
||||
return client
|
||||
else:
|
||||
resource = boto3.session.Session().resource(resource, region_name=region, endpoint_url=endpoint, **params)
|
||||
client = boto3.session.Session().client(resource, region_name=region, endpoint_url=endpoint, **params)
|
||||
resource = boto3.session.Session(profile_name=profile).resource(resource, region_name=region, endpoint_url=endpoint, **params)
|
||||
client = boto3.session.Session(profile_name=profile).client(resource, region_name=region, endpoint_url=endpoint, **params)
|
||||
return client, resource
|
||||
|
||||
|
||||
def aws_common_argument_spec():
|
||||
return dict(
|
||||
ec2_url=dict(),
|
||||
@@ -158,13 +167,12 @@ def get_aws_connection_info(module, boto3=False):
|
||||
if profile_name:
|
||||
boto_params['profile_name'] = profile_name
|
||||
|
||||
|
||||
else:
|
||||
boto_params = dict(aws_access_key_id=access_key,
|
||||
aws_secret_access_key=secret_key,
|
||||
security_token=security_token)
|
||||
|
||||
# profile_name only works as a key in boto >= 2.24
|
||||
# profile_name only works as a key in boto >= 2.24
|
||||
# so only set profile_name if passed as an argument
|
||||
if profile_name:
|
||||
if not boto_supports_profile_name():
|
||||
@@ -174,6 +182,10 @@ def get_aws_connection_info(module, boto3=False):
|
||||
if validate_certs and HAS_LOOSE_VERSION and LooseVersion(boto.Version) >= LooseVersion("2.6.0"):
|
||||
boto_params['validate_certs'] = validate_certs
|
||||
|
||||
for param, value in boto_params.items():
|
||||
if isinstance(value, str):
|
||||
boto_params[param] = unicode(value, 'utf-8', 'strict')
|
||||
|
||||
return region, ec2_url, boto_params
|
||||
|
||||
|
||||
@@ -196,9 +208,9 @@ def connect_to_aws(aws_module, region, **params):
|
||||
conn = aws_module.connect_to_region(region, **params)
|
||||
if not conn:
|
||||
if region not in [aws_module_region.name for aws_module_region in aws_module.regions()]:
|
||||
raise StandardError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade boto or extend with endpoints_path" % (region, aws_module.__name__))
|
||||
raise AnsibleAWSError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade boto or extend with endpoints_path" % (region, aws_module.__name__))
|
||||
else:
|
||||
raise StandardError("Unknown problem connecting to region %s for aws module %s." % (region, aws_module.__name__))
|
||||
raise AnsibleAWSError("Unknown problem connecting to region %s for aws module %s." % (region, aws_module.__name__))
|
||||
if params.get('profile_name'):
|
||||
conn = boto_fix_security_token_in_profile(conn, params['profile_name'])
|
||||
return conn
|
||||
@@ -214,13 +226,13 @@ def ec2_connect(module):
|
||||
if region:
|
||||
try:
|
||||
ec2 = connect_to_aws(boto.ec2, region, **boto_params)
|
||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
||||
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||
module.fail_json(msg=str(e))
|
||||
# Otherwise, no region so we fallback to the old connection method
|
||||
elif ec2_url:
|
||||
try:
|
||||
ec2 = boto.connect_ec2_endpoint(ec2_url, **boto_params)
|
||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
||||
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||
module.fail_json(msg=str(e))
|
||||
else:
|
||||
module.fail_json(msg="Either region or ec2_url must be specified")
|
||||
|
||||
227
lib/ansible/module_utils/eos.py
Normal file
227
lib/ansible/module_utils/eos.py
Normal file
@@ -0,0 +1,227 @@
|
||||
#
|
||||
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
NET_PASSWD_RE = re.compile(r"[\r\n]?password: $", re.I)
|
||||
|
||||
NET_COMMON_ARGS = dict(
|
||||
host=dict(required=True),
|
||||
port=dict(type='int'),
|
||||
username=dict(required=True),
|
||||
password=dict(no_log=True),
|
||||
authorize=dict(default=False, type='bool'),
|
||||
auth_pass=dict(no_log=True),
|
||||
transport=dict(choices=['cli', 'eapi']),
|
||||
use_ssl=dict(default=True, type='bool'),
|
||||
provider=dict()
|
||||
)
|
||||
|
||||
def to_list(val):
|
||||
if isinstance(val, (list, tuple)):
|
||||
return list(val)
|
||||
elif val is not None:
|
||||
return [val]
|
||||
else:
|
||||
return list()
|
||||
|
||||
class Eapi(object):
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
|
||||
# sets the module_utils/urls.py req parameters
|
||||
self.module.params['url_username'] = module.params['username']
|
||||
self.module.params['url_password'] = module.params['password']
|
||||
|
||||
self.url = None
|
||||
self.enable = None
|
||||
|
||||
def _get_body(self, commands, encoding, reqid=None):
|
||||
"""Create a valid eAPI JSON-RPC request message
|
||||
"""
|
||||
params = dict(version=1, cmds=commands, format=encoding)
|
||||
return dict(jsonrpc='2.0', id=reqid, method='runCmds', params=params)
|
||||
|
||||
def connect(self):
|
||||
host = self.module.params['host']
|
||||
port = self.module.params['port']
|
||||
|
||||
if self.module.params['use_ssl']:
|
||||
proto = 'https'
|
||||
if not port:
|
||||
port = 443
|
||||
else:
|
||||
proto = 'http'
|
||||
if not port:
|
||||
port = 80
|
||||
|
||||
self.url = '%s://%s:%s/command-api' % (proto, host, port)
|
||||
|
||||
def authorize(self):
|
||||
if self.module.params['auth_pass']:
|
||||
passwd = self.module.params['auth_pass']
|
||||
self.enable = dict(cmd='enable', input=passwd)
|
||||
else:
|
||||
self.enable = 'enable'
|
||||
|
||||
def send(self, commands, encoding='json'):
|
||||
"""Send commands to the device.
|
||||
"""
|
||||
clist = to_list(commands)
|
||||
|
||||
if self.enable is not None:
|
||||
clist.insert(0, self.enable)
|
||||
|
||||
data = self._get_body(clist, encoding)
|
||||
data = self.module.jsonify(data)
|
||||
|
||||
headers = {'Content-Type': 'application/json-rpc'}
|
||||
|
||||
response, headers = fetch_url(self.module, self.url, data=data,
|
||||
headers=headers, method='POST')
|
||||
|
||||
if headers['status'] != 200:
|
||||
self.module.fail_json(**headers)
|
||||
|
||||
response = self.module.from_json(response.read())
|
||||
if 'error' in response:
|
||||
err = response['error']
|
||||
self.module.fail_json(msg='json-rpc error', **err)
|
||||
|
||||
if self.enable:
|
||||
response['result'].pop(0)
|
||||
|
||||
return response['result']
|
||||
|
||||
class Cli(object):
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.shell = None
|
||||
|
||||
def connect(self, **kwargs):
|
||||
host = self.module.params['host']
|
||||
port = self.module.params['port'] or 22
|
||||
|
||||
username = self.module.params['username']
|
||||
password = self.module.params['password']
|
||||
|
||||
self.shell = Shell()
|
||||
self.shell.open(host, port=port, username=username, password=password)
|
||||
|
||||
def authorize(self):
|
||||
passwd = self.module.params['auth_pass']
|
||||
self.send(Command('enable', prompt=NET_PASSWD_RE, response=passwd))
|
||||
|
||||
def send(self, commands, encoding='text'):
|
||||
return self.shell.send(commands)
|
||||
|
||||
class NetworkModule(AnsibleModule):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(NetworkModule, self).__init__(*args, **kwargs)
|
||||
self.connection = None
|
||||
self._config = None
|
||||
|
||||
@property
|
||||
def config(self):
|
||||
if not self._config:
|
||||
self._config = self.get_config()
|
||||
return self._config
|
||||
|
||||
def _load_params(self):
|
||||
params = super(NetworkModule, self)._load_params()
|
||||
provider = params.get('provider') or dict()
|
||||
for key, value in provider.items():
|
||||
if key in NET_COMMON_ARGS.keys():
|
||||
params[key] = value
|
||||
return params
|
||||
|
||||
def connect(self):
|
||||
if self.params['transport'] == 'eapi':
|
||||
self.connection = Eapi(self)
|
||||
else:
|
||||
self.connection = Cli(self)
|
||||
|
||||
try:
|
||||
self.connection.connect()
|
||||
self.execute('terminal length 0')
|
||||
|
||||
if self.params['authorize']:
|
||||
self.connection.authorize()
|
||||
|
||||
except Exception, exc:
|
||||
self.fail_json(msg=exc.message)
|
||||
|
||||
def configure(self, commands):
|
||||
commands = to_list(commands)
|
||||
commands.insert(0, 'configure terminal')
|
||||
responses = self.execute(commands)
|
||||
responses.pop(0)
|
||||
|
||||
return responses
|
||||
|
||||
def config_replace(self, commands):
|
||||
if self.params['transport'] == 'cli':
|
||||
self.fail_json(msg='config replace only supported over eapi')
|
||||
|
||||
cmd = 'configure replace terminal:'
|
||||
commands = '\n'.join(to_list(commands))
|
||||
command = dict(cmd=cmd, input=commands)
|
||||
self.execute(command)
|
||||
|
||||
def execute(self, commands, **kwargs):
|
||||
try:
|
||||
return self.connection.send(commands, **kwargs)
|
||||
except Exception, exc:
|
||||
self.fail_json(msg=exc.message, commands=commands)
|
||||
|
||||
def disconnect(self):
|
||||
self.connection.close()
|
||||
|
||||
def parse_config(self, cfg):
|
||||
return parse(cfg, indent=3)
|
||||
|
||||
def get_config(self):
|
||||
cmd = 'show running-config'
|
||||
if self.params.get('include_defaults'):
|
||||
cmd += ' all'
|
||||
if self.params['transport'] == 'cli':
|
||||
return self.execute(cmd)[0]
|
||||
else:
|
||||
resp = self.execute(cmd, encoding='text')
|
||||
return resp[0]
|
||||
|
||||
|
||||
def get_module(**kwargs):
|
||||
"""Return instance of NetworkModule
|
||||
"""
|
||||
argument_spec = NET_COMMON_ARGS.copy()
|
||||
if kwargs.get('argument_spec'):
|
||||
argument_spec.update(kwargs['argument_spec'])
|
||||
kwargs['argument_spec'] = argument_spec
|
||||
|
||||
module = NetworkModule(**kwargs)
|
||||
|
||||
# HAS_PARAMIKO is set by module_utils/shell.py
|
||||
if module.params['transport'] == 'cli' and not HAS_PARAMIKO:
|
||||
module.fail_json(msg='paramiko is required but does not appear to be installed')
|
||||
|
||||
module.connect()
|
||||
|
||||
return module
|
||||
|
||||
@@ -51,19 +51,35 @@ def f5_argument_spec():
|
||||
def f5_parse_arguments(module):
|
||||
if not bigsuds_found:
|
||||
module.fail_json(msg="the python bigsuds module is required")
|
||||
if not module.params['validate_certs']:
|
||||
disable_ssl_cert_validation()
|
||||
|
||||
if module.params['validate_certs']:
|
||||
import ssl
|
||||
if not hasattr(ssl, 'SSLContext'):
|
||||
module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task')
|
||||
|
||||
return (module.params['server'],module.params['user'],module.params['password'],module.params['state'],module.params['partition'],module.params['validate_certs'])
|
||||
|
||||
def bigip_api(bigip, user, password):
|
||||
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
|
||||
return api
|
||||
def bigip_api(bigip, user, password, validate_certs):
|
||||
try:
|
||||
# bigsuds >= 1.0.3
|
||||
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password, verify=validate_certs)
|
||||
except TypeError:
|
||||
# bigsuds < 1.0.3, no verify param
|
||||
if validate_certs:
|
||||
# Note: verified we have SSLContext when we parsed params
|
||||
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
|
||||
else:
|
||||
import ssl
|
||||
if hasattr(ssl, 'SSLContext'):
|
||||
# Really, you should never do this. It disables certificate
|
||||
# verification *globally*. But since older bigip libraries
|
||||
# don't give us a way to toggle verification we need to
|
||||
# disable it at the global level.
|
||||
# From https://www.python.org/dev/peps/pep-0476/#id29
|
||||
ssl._create_default_https_context = ssl._create_unverified_context
|
||||
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
|
||||
|
||||
def disable_ssl_cert_validation():
|
||||
# You probably only want to do this for testing and never in production.
|
||||
# From https://www.python.org/dev/peps/pep-0476/#id29
|
||||
import ssl
|
||||
ssl._create_default_https_context = ssl._create_unverified_context
|
||||
return api
|
||||
|
||||
# Fully Qualified name (with the partition)
|
||||
def fq_name(partition,name):
|
||||
|
||||
@@ -119,6 +119,7 @@ class Facts(object):
|
||||
('/etc/gentoo-release', 'Gentoo'),
|
||||
('/etc/os-release', 'Debian'),
|
||||
('/etc/lsb-release', 'Mandriva'),
|
||||
('/etc/altlinux-release', 'Altlinux'),
|
||||
('/etc/os-release', 'NA'),
|
||||
)
|
||||
SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' }
|
||||
@@ -270,7 +271,7 @@ class Facts(object):
|
||||
OracleLinux = 'RedHat', OVS = 'RedHat', OEL = 'RedHat', Amazon = 'RedHat',
|
||||
XenServer = 'RedHat', Ubuntu = 'Debian', Debian = 'Debian', Raspbian = 'Debian', Slackware = 'Slackware', SLES = 'Suse',
|
||||
SLED = 'Suse', openSUSE = 'Suse', SuSE = 'Suse', SLES_SAP = 'Suse', Gentoo = 'Gentoo', Funtoo = 'Gentoo',
|
||||
Archlinux = 'Archlinux', Manjaro = 'Archlinux', Mandriva = 'Mandrake', Mandrake = 'Mandrake',
|
||||
Archlinux = 'Archlinux', Manjaro = 'Archlinux', Mandriva = 'Mandrake', Mandrake = 'Mandrake', Altlinux = 'Altlinux',
|
||||
Solaris = 'Solaris', Nexenta = 'Solaris', OmniOS = 'Solaris', OpenIndiana = 'Solaris',
|
||||
SmartOS = 'Solaris', AIX = 'AIX', Alpine = 'Alpine', MacOSX = 'Darwin',
|
||||
FreeBSD = 'FreeBSD', HPUX = 'HP-UX'
|
||||
@@ -323,7 +324,7 @@ class Facts(object):
|
||||
for (path, name) in Facts.OSDIST_LIST:
|
||||
if os.path.exists(path):
|
||||
if os.path.getsize(path) > 0:
|
||||
if self.facts['distribution'] in ('Fedora', ):
|
||||
if self.facts['distribution'] in ('Fedora', 'Altlinux', ):
|
||||
# Once we determine the value is one of these distros
|
||||
# we trust the values are always correct
|
||||
break
|
||||
@@ -356,6 +357,13 @@ class Facts(object):
|
||||
else:
|
||||
self.facts['distribution'] = data.split()[0]
|
||||
break
|
||||
elif name == 'Altlinux':
|
||||
data = get_file_content(path)
|
||||
if 'ALT Linux' in data:
|
||||
self.facts['distribution'] = name
|
||||
else:
|
||||
self.facts['distribution'] = data.split()[0]
|
||||
break
|
||||
elif name == 'OtherLinux':
|
||||
data = get_file_content(path)
|
||||
if 'Amazon' in data:
|
||||
@@ -524,7 +532,10 @@ class Facts(object):
|
||||
keytypes = ('dsa', 'rsa', 'ecdsa', 'ed25519')
|
||||
|
||||
if self.facts['system'] == 'Darwin':
|
||||
keydir = '/etc'
|
||||
if self.facts['distribution'] == 'MacOSX' and LooseVersion(self.facts['distribution_version']) >= LooseVersion('10.11') :
|
||||
keydir = '/etc/ssh'
|
||||
else:
|
||||
keydir = '/etc'
|
||||
else:
|
||||
keydir = '/etc/ssh'
|
||||
|
||||
@@ -544,21 +555,23 @@ class Facts(object):
|
||||
self.facts['pkg_mgr'] = 'openbsd_pkg'
|
||||
|
||||
def get_service_mgr_facts(self):
|
||||
#TODO: detect more custom init setups like bootscripts, dmd, s6, etc
|
||||
#TODO: detect more custom init setups like bootscripts, dmd, s6, Epoch, runit, etc
|
||||
# also other OSs other than linux might need to check across several possible candidates
|
||||
|
||||
# try various forms of querying pid 1
|
||||
proc_1 = get_file_content('/proc/1/comm')
|
||||
if proc_1 is None:
|
||||
rc, proc_1, err = module.run_command("ps -p 1 -o comm|tail -n 1", use_unsafe_shell=True)
|
||||
else:
|
||||
proc_1 = os.path.basename(proc_1)
|
||||
|
||||
if proc_1 in ['init', '/sbin/init']:
|
||||
# many systems return init, so this cannot be trusted
|
||||
if proc_1 == 'init' or proc_1.endswith('sh'):
|
||||
# many systems return init, so this cannot be trusted, if it ends in 'sh' it probalby is a shell in a container
|
||||
proc_1 = None
|
||||
|
||||
# if not init/None it should be an identifiable or custom init, so we are done!
|
||||
if proc_1 is not None:
|
||||
self.facts['service_mgr'] = proc_1
|
||||
self.facts['service_mgr'] = proc_1.strip()
|
||||
|
||||
# start with the easy ones
|
||||
elif self.facts['distribution'] == 'MacOSX':
|
||||
@@ -567,7 +580,7 @@ class Facts(object):
|
||||
self.facts['service_mgr'] = 'launchd'
|
||||
else:
|
||||
self.facts['service_mgr'] = 'systemstarter'
|
||||
elif self.facts['system'].endswith('BSD') or self.facts['system'] in ['Bitrig', 'DragonFly']:
|
||||
elif 'BSD' in self.facts['system'] or self.facts['system'] in ['Bitrig', 'DragonFly']:
|
||||
#FIXME: we might want to break out to individual BSDs
|
||||
self.facts['service_mgr'] = 'bsdinit'
|
||||
elif self.facts['system'] == 'AIX':
|
||||
@@ -576,12 +589,11 @@ class Facts(object):
|
||||
#FIXME: smf?
|
||||
self.facts['service_mgr'] = 'svcs'
|
||||
elif self.facts['system'] == 'Linux':
|
||||
|
||||
if self._check_systemd():
|
||||
self.facts['service_mgr'] = 'systemd'
|
||||
elif module.get_bin_path('initctl') and os.path.exists("/etc/init/"):
|
||||
self.facts['service_mgr'] = 'upstart'
|
||||
elif module.get_bin_path('rc-service'):
|
||||
elif os.path.realpath('/sbin/rc') == '/sbin/openrc':
|
||||
self.facts['service_mgr'] = 'openrc'
|
||||
elif os.path.exists('/etc/init.d/'):
|
||||
self.facts['service_mgr'] = 'sysvinit'
|
||||
@@ -2971,14 +2983,19 @@ def get_file_content(path, default=None, strip=True):
|
||||
data = default
|
||||
if os.path.exists(path) and os.access(path, os.R_OK):
|
||||
try:
|
||||
datafile = open(path)
|
||||
data = datafile.read()
|
||||
if strip:
|
||||
data = data.strip()
|
||||
if len(data) == 0:
|
||||
data = default
|
||||
finally:
|
||||
datafile.close()
|
||||
try:
|
||||
datafile = open(path)
|
||||
data = datafile.read()
|
||||
if strip:
|
||||
data = data.strip()
|
||||
if len(data) == 0:
|
||||
data = default
|
||||
finally:
|
||||
datafile.close()
|
||||
except:
|
||||
# ignore errors as some jails/containers might have readable permissions but not allow reads to proc
|
||||
# done in 2 blocks for 2.4 compat
|
||||
pass
|
||||
return data
|
||||
|
||||
def get_file_lines(path):
|
||||
|
||||
134
lib/ansible/module_utils/ios.py
Normal file
134
lib/ansible/module_utils/ios.py
Normal file
@@ -0,0 +1,134 @@
|
||||
#
|
||||
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
NET_PASSWD_RE = re.compile(r"[\r\n]?password: $", re.I)
|
||||
|
||||
NET_COMMON_ARGS = dict(
|
||||
host=dict(required=True),
|
||||
port=dict(default=22, type='int'),
|
||||
username=dict(required=True),
|
||||
password=dict(no_log=True),
|
||||
authorize=dict(default=False, type='bool'),
|
||||
auth_pass=dict(no_log=True),
|
||||
provider=dict()
|
||||
)
|
||||
|
||||
def to_list(val):
|
||||
if isinstance(val, (list, tuple)):
|
||||
return list(val)
|
||||
elif val is not None:
|
||||
return [val]
|
||||
else:
|
||||
return list()
|
||||
|
||||
class Cli(object):
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.shell = None
|
||||
|
||||
def connect(self, **kwargs):
|
||||
host = self.module.params['host']
|
||||
port = self.module.params['port'] or 22
|
||||
|
||||
username = self.module.params['username']
|
||||
password = self.module.params['password']
|
||||
|
||||
self.shell = Shell()
|
||||
self.shell.open(host, port=port, username=username, password=password)
|
||||
|
||||
def authorize(self):
|
||||
passwd = self.module.params['auth_pass']
|
||||
self.send(Command('enable', prompt=NET_PASSWD_RE, response=passwd))
|
||||
|
||||
def send(self, commands):
|
||||
return self.shell.send(commands)
|
||||
|
||||
class NetworkModule(AnsibleModule):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(NetworkModule, self).__init__(*args, **kwargs)
|
||||
self.connection = None
|
||||
self._config = None
|
||||
|
||||
@property
|
||||
def config(self):
|
||||
if not self._config:
|
||||
self._config = self.get_config()
|
||||
return self._config
|
||||
|
||||
def _load_params(self):
|
||||
params = super(NetworkModule, self)._load_params()
|
||||
provider = params.get('provider') or dict()
|
||||
for key, value in provider.items():
|
||||
if key in NET_COMMON_ARGS.keys():
|
||||
params[key] = value
|
||||
return params
|
||||
|
||||
def connect(self):
|
||||
try:
|
||||
self.connection = Cli(self)
|
||||
self.connection.connect()
|
||||
self.execute('terminal length 0')
|
||||
|
||||
if self.params['authorize']:
|
||||
self.connection.authorize()
|
||||
|
||||
except Exception, exc:
|
||||
self.fail_json(msg=exc.message)
|
||||
|
||||
def configure(self, commands):
|
||||
commands = to_list(commands)
|
||||
commands.insert(0, 'configure terminal')
|
||||
responses = self.execute(commands)
|
||||
responses.pop(0)
|
||||
return responses
|
||||
|
||||
def execute(self, commands, **kwargs):
|
||||
return self.connection.send(commands)
|
||||
|
||||
def disconnect(self):
|
||||
self.connection.close()
|
||||
|
||||
def parse_config(self, cfg):
|
||||
return parse(cfg, indent=1)
|
||||
|
||||
def get_config(self):
|
||||
cmd = 'show running-config'
|
||||
if self.params.get('include_defaults'):
|
||||
cmd += ' all'
|
||||
return self.execute(cmd)[0]
|
||||
|
||||
def get_module(**kwargs):
|
||||
"""Return instance of NetworkModule
|
||||
"""
|
||||
argument_spec = NET_COMMON_ARGS.copy()
|
||||
if kwargs.get('argument_spec'):
|
||||
argument_spec.update(kwargs['argument_spec'])
|
||||
kwargs['argument_spec'] = argument_spec
|
||||
|
||||
module = NetworkModule(**kwargs)
|
||||
|
||||
# HAS_PARAMIKO is set by module_utils/shell.py
|
||||
if not HAS_PARAMIKO:
|
||||
module.fail_json(msg='paramiko is required but does not appear to be installed')
|
||||
|
||||
module.connect()
|
||||
return module
|
||||
|
||||
122
lib/ansible/module_utils/iosxr.py
Normal file
122
lib/ansible/module_utils/iosxr.py
Normal file
@@ -0,0 +1,122 @@
|
||||
#
|
||||
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
NET_PASSWD_RE = re.compile(r"[\r\n]?password: $", re.I)
|
||||
|
||||
NET_COMMON_ARGS = dict(
|
||||
host=dict(required=True),
|
||||
port=dict(default=22, type='int'),
|
||||
username=dict(required=True),
|
||||
password=dict(no_log=True),
|
||||
provider=dict()
|
||||
)
|
||||
|
||||
def to_list(val):
|
||||
if isinstance(val, (list, tuple)):
|
||||
return list(val)
|
||||
elif val is not None:
|
||||
return [val]
|
||||
else:
|
||||
return list()
|
||||
|
||||
class Cli(object):
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.shell = None
|
||||
|
||||
def connect(self, **kwargs):
|
||||
host = self.module.params['host']
|
||||
port = self.module.params['port'] or 22
|
||||
|
||||
username = self.module.params['username']
|
||||
password = self.module.params['password']
|
||||
|
||||
self.shell = Shell()
|
||||
self.shell.open(host, port=port, username=username, password=password)
|
||||
|
||||
def send(self, commands):
|
||||
return self.shell.send(commands)
|
||||
|
||||
class NetworkModule(AnsibleModule):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(NetworkModule, self).__init__(*args, **kwargs)
|
||||
self.connection = None
|
||||
self._config = None
|
||||
|
||||
@property
|
||||
def config(self):
|
||||
if not self._config:
|
||||
self._config = self.get_config()
|
||||
return self._config
|
||||
|
||||
def _load_params(self):
|
||||
params = super(NetworkModule, self)._load_params()
|
||||
provider = params.get('provider') or dict()
|
||||
for key, value in provider.items():
|
||||
if key in NET_COMMON_ARGS.keys():
|
||||
params[key] = value
|
||||
return params
|
||||
|
||||
def connect(self):
|
||||
try:
|
||||
self.connection = Cli(self)
|
||||
self.connection.connect()
|
||||
self.execute('terminal length 0')
|
||||
except Exception, exc:
|
||||
self.fail_json(msg=exc.message)
|
||||
|
||||
def configure(self, commands):
|
||||
commands = to_list(commands)
|
||||
commands.insert(0, 'configure terminal')
|
||||
commands.append('commit')
|
||||
responses = self.execute(commands)
|
||||
responses.pop(0)
|
||||
responses.pop()
|
||||
return responses
|
||||
|
||||
def execute(self, commands, **kwargs):
|
||||
return self.connection.send(commands)
|
||||
|
||||
def disconnect(self):
|
||||
self.connection.close()
|
||||
|
||||
def parse_config(self, cfg):
|
||||
return parse(cfg, indent=1)
|
||||
|
||||
def get_config(self):
|
||||
return self.execute('show running-config')[0]
|
||||
|
||||
def get_module(**kwargs):
|
||||
"""Return instance of NetworkModule
|
||||
"""
|
||||
argument_spec = NET_COMMON_ARGS.copy()
|
||||
if kwargs.get('argument_spec'):
|
||||
argument_spec.update(kwargs['argument_spec'])
|
||||
kwargs['argument_spec'] = argument_spec
|
||||
|
||||
module = NetworkModule(**kwargs)
|
||||
|
||||
if not HAS_PARAMIKO:
|
||||
module.fail_json(msg='paramiko is required but does not appear to be installed')
|
||||
|
||||
module.connect()
|
||||
return module
|
||||
|
||||
122
lib/ansible/module_utils/junos.py
Normal file
122
lib/ansible/module_utils/junos.py
Normal file
@@ -0,0 +1,122 @@
|
||||
#
|
||||
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
NET_COMMON_ARGS = dict(
|
||||
host=dict(required=True),
|
||||
port=dict(default=22, type='int'),
|
||||
username=dict(required=True),
|
||||
password=dict(no_log=True),
|
||||
provider=dict()
|
||||
)
|
||||
|
||||
def to_list(val):
|
||||
if isinstance(val, (list, tuple)):
|
||||
return list(val)
|
||||
elif val is not None:
|
||||
return [val]
|
||||
else:
|
||||
return list()
|
||||
|
||||
class Cli(object):
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.shell = None
|
||||
|
||||
def connect(self, **kwargs):
|
||||
host = self.module.params['host']
|
||||
port = self.module.params['port'] or 22
|
||||
|
||||
username = self.module.params['username']
|
||||
password = self.module.params['password']
|
||||
|
||||
self.shell = Shell()
|
||||
self.shell.open(host, port=port, username=username, password=password)
|
||||
|
||||
def send(self, commands):
|
||||
return self.shell.send(commands)
|
||||
|
||||
|
||||
class NetworkModule(AnsibleModule):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(NetworkModule, self).__init__(*args, **kwargs)
|
||||
self.connection = None
|
||||
self._config = None
|
||||
|
||||
@property
|
||||
def config(self):
|
||||
if not self._config:
|
||||
self._config = self.get_config()
|
||||
return self._config
|
||||
|
||||
def _load_params(self):
|
||||
params = super(NetworkModule, self)._load_params()
|
||||
provider = params.get('provider') or dict()
|
||||
for key, value in provider.items():
|
||||
if key in NET_COMMON_ARGS.keys():
|
||||
params[key] = value
|
||||
return params
|
||||
|
||||
def connect(self):
|
||||
self.connection = Cli(self)
|
||||
self.connection.connect()
|
||||
self.execute('cli')
|
||||
self.execute('set cli screen-length 0')
|
||||
|
||||
def configure(self, commands):
|
||||
commands = to_list(commands)
|
||||
commands.insert(0, 'configure')
|
||||
commands.append('commit and-quit')
|
||||
responses = self.execute(commands)
|
||||
responses.pop(0)
|
||||
responses.pop()
|
||||
return responses
|
||||
|
||||
def execute(self, commands, **kwargs):
|
||||
return self.connection.send(commands)
|
||||
|
||||
def disconnect(self):
|
||||
self.connection.close()
|
||||
|
||||
def parse_config(self, cfg):
|
||||
return parse(cfg, indent=4)
|
||||
|
||||
def get_config(self):
|
||||
cmd = 'show configuration'
|
||||
return self.execute(cmd)[0]
|
||||
|
||||
def get_module(**kwargs):
|
||||
"""Return instance of NetworkModule
|
||||
"""
|
||||
argument_spec = NET_COMMON_ARGS.copy()
|
||||
if kwargs.get('argument_spec'):
|
||||
argument_spec.update(kwargs['argument_spec'])
|
||||
kwargs['argument_spec'] = argument_spec
|
||||
kwargs['check_invalid_arguments'] = False
|
||||
|
||||
module = NetworkModule(**kwargs)
|
||||
|
||||
# HAS_PARAMIKO is set by module_utils/shell.py
|
||||
if not HAS_PARAMIKO:
|
||||
module.fail_json(msg='paramiko is required but does not appear to be installed')
|
||||
|
||||
module.connect()
|
||||
return module
|
||||
|
||||
@@ -28,7 +28,11 @@
|
||||
|
||||
import os
|
||||
import hmac
|
||||
import urlparse
|
||||
|
||||
try:
|
||||
import urlparse
|
||||
except ImportError:
|
||||
import urllib.parse as urlparse
|
||||
|
||||
try:
|
||||
from hashlib import sha1
|
||||
@@ -74,12 +78,12 @@ def get_fqdn(repo_url):
|
||||
if "@" in repo_url and "://" not in repo_url:
|
||||
# most likely an user@host:path or user@host/path type URL
|
||||
repo_url = repo_url.split("@", 1)[1]
|
||||
if ":" in repo_url:
|
||||
repo_url = repo_url.split(":")[0]
|
||||
result = repo_url
|
||||
if repo_url.startswith('['):
|
||||
result = repo_url.split(']', 1)[0] + ']'
|
||||
elif ":" in repo_url:
|
||||
result = repo_url.split(":")[0]
|
||||
elif "/" in repo_url:
|
||||
repo_url = repo_url.split("/")[0]
|
||||
result = repo_url
|
||||
result = repo_url.split("/")[0]
|
||||
elif "://" in repo_url:
|
||||
# this should be something we can parse with urlparse
|
||||
parts = urlparse.urlparse(repo_url)
|
||||
@@ -87,11 +91,13 @@ def get_fqdn(repo_url):
|
||||
# ensure we actually have a parts[1] before continuing.
|
||||
if parts[1] != '':
|
||||
result = parts[1]
|
||||
if ":" in result:
|
||||
result = result.split(":")[0]
|
||||
if "@" in result:
|
||||
result = result.split("@", 1)[1]
|
||||
|
||||
if result[0].startswith('['):
|
||||
result = result.split(']', 1)[0] + ']'
|
||||
elif ":" in result:
|
||||
result = result.split(":")[0]
|
||||
return result
|
||||
|
||||
def check_hostkey(module, fqdn):
|
||||
@@ -169,7 +175,7 @@ def add_host_key(module, fqdn, key_type="rsa", create_dir=False):
|
||||
if not os.path.exists(user_ssh_dir):
|
||||
if create_dir:
|
||||
try:
|
||||
os.makedirs(user_ssh_dir, 0700)
|
||||
os.makedirs(user_ssh_dir, int('700', 8))
|
||||
except:
|
||||
module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir)
|
||||
else:
|
||||
|
||||
66
lib/ansible/module_utils/mysql.py
Normal file
66
lib/ansible/module_utils/mysql.py
Normal file
@@ -0,0 +1,66 @@
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# Copyright (c), Jonathan Mainguy <jon@soh.re>, 2015
|
||||
# Most of this was originally added by Sven Schliesing @muffl0n in the mysql_user.py module
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
|
||||
def mysql_connect(module, login_user=None, login_password=None, config_file='', ssl_cert=None, ssl_key=None, ssl_ca=None, db=None, cursor_class=None):
|
||||
config = {
|
||||
'host': module.params['login_host'],
|
||||
'ssl': {
|
||||
}
|
||||
}
|
||||
|
||||
if module.params['login_unix_socket']:
|
||||
config['unix_socket'] = module.params['login_unix_socket']
|
||||
else:
|
||||
config['port'] = module.params['login_port']
|
||||
|
||||
if os.path.exists(config_file):
|
||||
config['read_default_file'] = config_file
|
||||
|
||||
# If login_user or login_password are given, they should override the
|
||||
# config file
|
||||
if login_user is not None:
|
||||
config['user'] = login_user
|
||||
if login_password is not None:
|
||||
config['passwd'] = login_password
|
||||
if ssl_cert is not None:
|
||||
config['ssl']['cert'] = ssl_cert
|
||||
if ssl_key is not None:
|
||||
config['ssl']['key'] = ssl_key
|
||||
if ssl_ca is not None:
|
||||
config['ssl']['ca'] = ssl_ca
|
||||
if db is not None:
|
||||
config['db'] = db
|
||||
|
||||
db_connection = MySQLdb.connect(**config)
|
||||
if cursor_class is not None:
|
||||
return db_connection.cursor(cursorclass=MySQLdb.cursors.DictCursor)
|
||||
else:
|
||||
return db_connection.cursor()
|
||||
85
lib/ansible/module_utils/netcfg.py
Normal file
85
lib/ansible/module_utils/netcfg.py
Normal file
@@ -0,0 +1,85 @@
|
||||
#
|
||||
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
import re
|
||||
import collections
|
||||
|
||||
class ConfigLine(object):
|
||||
|
||||
def __init__(self, text):
|
||||
self.text = text
|
||||
self.children = list()
|
||||
self.parents = list()
|
||||
self.raw = None
|
||||
|
||||
def __str__(self):
|
||||
return self.raw
|
||||
|
||||
def __eq__(self, other):
|
||||
if self.text == other.text:
|
||||
return self.parents == other.parents
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
|
||||
def parse(lines, indent):
|
||||
toplevel = re.compile(r'\S')
|
||||
childline = re.compile(r'^\s*(.+)$')
|
||||
repl = r'([{|}|;])'
|
||||
|
||||
ancestors = list()
|
||||
config = list()
|
||||
|
||||
for line in str(lines).split('\n'):
|
||||
text = str(re.sub(repl, '', line)).strip()
|
||||
|
||||
cfg = ConfigLine(text)
|
||||
cfg.raw = line
|
||||
|
||||
if not text or text[0] in ['!', '#']:
|
||||
continue
|
||||
|
||||
# handle top level commands
|
||||
if toplevel.match(line):
|
||||
ancestors = [cfg]
|
||||
|
||||
# handle sub level commands
|
||||
else:
|
||||
match = childline.match(line)
|
||||
line_indent = match.start(1)
|
||||
level = int(line_indent / indent)
|
||||
parent_level = level - 1
|
||||
|
||||
cfg.parents = ancestors[:level]
|
||||
|
||||
if level > len(ancestors):
|
||||
config.append(cfg)
|
||||
continue
|
||||
|
||||
for i in range(level, len(ancestors)):
|
||||
ancestors.pop()
|
||||
|
||||
ancestors.append(cfg)
|
||||
ancestors[parent_level].children.append(cfg)
|
||||
|
||||
config.append(cfg)
|
||||
|
||||
return config
|
||||
|
||||
|
||||
@@ -1,130 +0,0 @@
|
||||
#
|
||||
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
"""
|
||||
This module adds support for Cisco NXAPI to Ansible shared
|
||||
module_utils. It builds on module_utils/urls.py to provide
|
||||
NXAPI support over HTTP/S which is required for proper operation.
|
||||
|
||||
In order to use this module, include it as part of a custom
|
||||
module as shown below.
|
||||
|
||||
** Note: The order of the import statements does matter. **
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.urls import *
|
||||
from ansible.module_utils.nxapi import *
|
||||
|
||||
The nxapi module provides the following common argument spec:
|
||||
|
||||
* host (str) - [Required] The IPv4 address or FQDN of the network device
|
||||
|
||||
* port (str) - Overrides the default port to use for the HTTP/S
|
||||
connection. The default values are 80 for HTTP and
|
||||
443 for HTTPS
|
||||
|
||||
* url_username (str) - [Required] The username to use to authenticate
|
||||
the HTTP/S connection. Aliases: username
|
||||
|
||||
* url_password (str) - [Required] The password to use to authenticate
|
||||
the HTTP/S connection. Aliases: password
|
||||
|
||||
* use_ssl (bool) - Specifies whether or not to use an encrypted (HTTPS)
|
||||
connection or not. The default value is False.
|
||||
|
||||
* command_type (str) - The type of command to send to the remote
|
||||
device. Valid values in `cli_show`, `cli_show_ascii`, 'cli_conf`
|
||||
and `bash`. The default value is `cli_show_ascii`
|
||||
|
||||
In order to communicate with Cisco NXOS devices, the NXAPI feature
|
||||
must be enabled and configured on the device.
|
||||
|
||||
"""
|
||||
|
||||
NXAPI_COMMAND_TYPES = ['cli_show', 'cli_show_ascii', 'cli_conf', 'bash']
|
||||
|
||||
def nxapi_argument_spec(spec=None):
|
||||
"""Creates an argument spec for working with NXAPI
|
||||
"""
|
||||
arg_spec = url_argument_spec()
|
||||
arg_spec.update(dict(
|
||||
host=dict(required=True),
|
||||
port=dict(),
|
||||
url_username=dict(required=True, aliases=['username']),
|
||||
url_password=dict(required=True, aliases=['password']),
|
||||
use_ssl=dict(default=False, type='bool'),
|
||||
command_type=dict(default='cli_show_ascii', choices=NXAPI_COMMAND_TYPES)
|
||||
))
|
||||
if spec:
|
||||
arg_spec.update(spec)
|
||||
return arg_spec
|
||||
|
||||
def nxapi_url(module):
|
||||
"""Constructs a valid NXAPI url
|
||||
"""
|
||||
if module.params['use_ssl']:
|
||||
proto = 'https'
|
||||
else:
|
||||
proto = 'http'
|
||||
host = module.params['host']
|
||||
url = '{}://{}'.format(proto, host)
|
||||
port = module.params['port']
|
||||
if module.params['port']:
|
||||
url = '{}:{}'.format(url, module.params['port'])
|
||||
url = '{}/ins'.format(url)
|
||||
return url
|
||||
|
||||
def nxapi_body(commands, command_type, **kwargs):
|
||||
"""Encodes a NXAPI JSON request message
|
||||
"""
|
||||
if isinstance(commands, (list, set, tuple)):
|
||||
commands = ' ;'.join(commands)
|
||||
|
||||
msg = {
|
||||
'version': kwargs.get('version') or '1.2',
|
||||
'type': command_type,
|
||||
'chunk': kwargs.get('chunk') or '0',
|
||||
'sid': kwargs.get('sid'),
|
||||
'input': commands,
|
||||
'output_format': 'json'
|
||||
}
|
||||
|
||||
return dict(ins_api=msg)
|
||||
|
||||
def nxapi_command(module, commands, command_type=None, **kwargs):
|
||||
"""Sends the list of commands to the device over NXAPI
|
||||
"""
|
||||
url = nxapi_url(module)
|
||||
|
||||
command_type = command_type or module.params['command_type']
|
||||
|
||||
data = nxapi_body(commands, command_type)
|
||||
data = module.jsonify(data)
|
||||
|
||||
headers = {'Content-Type': 'text/json'}
|
||||
|
||||
response, headers = fetch_url(module, url, data=data, headers=headers,
|
||||
method='POST')
|
||||
|
||||
status = kwargs.get('status') or 200
|
||||
if headers['status'] != status:
|
||||
module.fail_json(**headers)
|
||||
|
||||
response = module.from_json(response.read())
|
||||
return response, headers
|
||||
|
||||
217
lib/ansible/module_utils/nxos.py
Normal file
217
lib/ansible/module_utils/nxos.py
Normal file
@@ -0,0 +1,217 @@
|
||||
#
|
||||
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
NET_PASSWD_RE = re.compile(r"[\r\n]?password: $", re.I)
|
||||
|
||||
NET_COMMON_ARGS = dict(
|
||||
host=dict(required=True),
|
||||
port=dict(type='int'),
|
||||
username=dict(required=True),
|
||||
password=dict(no_log=True),
|
||||
transport=dict(choices=['cli', 'nxapi']),
|
||||
use_ssl=dict(default=False, type='bool'),
|
||||
provider=dict()
|
||||
)
|
||||
|
||||
NXAPI_COMMAND_TYPES = ['cli_show', 'cli_show_ascii', 'cli_conf', 'bash']
|
||||
NXAPI_ENCODINGS = ['json', 'xml']
|
||||
|
||||
def to_list(val):
|
||||
if isinstance(val, (list, tuple)):
|
||||
return list(val)
|
||||
elif val is not None:
|
||||
return [val]
|
||||
else:
|
||||
return list()
|
||||
|
||||
class Nxapi(object):
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
|
||||
# sets the module_utils/urls.py req parameters
|
||||
self.module.params['url_username'] = module.params['username']
|
||||
self.module.params['url_password'] = module.params['password']
|
||||
|
||||
self.url = None
|
||||
self.enable = None
|
||||
|
||||
def _get_body(self, commands, command_type, encoding, version='1.2', chunk='0', sid=None):
|
||||
"""Encodes a NXAPI JSON request message
|
||||
"""
|
||||
if isinstance(commands, (list, set, tuple)):
|
||||
commands = ' ;'.join(commands)
|
||||
|
||||
if encoding not in NXAPI_ENCODINGS:
|
||||
self.module.fail_json("Invalid encoding. Received %s. Expected one of %s" %
|
||||
(encoding, ','.join(NXAPI_ENCODINGS)))
|
||||
|
||||
msg = {
|
||||
'version': version,
|
||||
'type': command_type,
|
||||
'chunk': chunk,
|
||||
'sid': sid,
|
||||
'input': commands,
|
||||
'output_format': encoding
|
||||
}
|
||||
return dict(ins_api=msg)
|
||||
|
||||
def connect(self):
|
||||
host = self.module.params['host']
|
||||
port = self.module.params['port']
|
||||
|
||||
if self.module.params['use_ssl']:
|
||||
proto = 'https'
|
||||
if not port:
|
||||
port = 443
|
||||
else:
|
||||
proto = 'http'
|
||||
if not port:
|
||||
port = 80
|
||||
|
||||
self.url = '%s://%s:%s/ins' % (proto, host, port)
|
||||
|
||||
def send(self, commands, command_type='cli_show_ascii', encoding='json'):
|
||||
"""Send commands to the device.
|
||||
"""
|
||||
clist = to_list(commands)
|
||||
|
||||
if command_type not in NXAPI_COMMAND_TYPES:
|
||||
self.module.fail_json(msg="Invalid command_type. Received %s. Expected one of %s." %
|
||||
(command_type, ','.join(NXAPI_COMMAND_TYPES)))
|
||||
|
||||
data = self._get_body(clist, command_type, encoding)
|
||||
data = self.module.jsonify(data)
|
||||
|
||||
headers = {'Content-Type': 'application/json'}
|
||||
|
||||
response, headers = fetch_url(self.module, self.url, data=data, headers=headers,
|
||||
method='POST')
|
||||
|
||||
if headers['status'] != 200:
|
||||
self.module.fail_json(**headers)
|
||||
|
||||
response = self.module.from_json(response.read())
|
||||
if 'error' in response:
|
||||
err = response['error']
|
||||
self.module.fail_json(msg='json-rpc error % ' % str(err))
|
||||
|
||||
return response
|
||||
|
||||
class Cli(object):
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.shell = None
|
||||
|
||||
def connect(self, **kwargs):
|
||||
host = self.module.params['host']
|
||||
port = self.module.params['port'] or 22
|
||||
|
||||
username = self.module.params['username']
|
||||
password = self.module.params['password']
|
||||
|
||||
self.shell = Shell()
|
||||
self.shell.open(host, port=port, username=username, password=password)
|
||||
|
||||
def send(self, commands, encoding='text'):
|
||||
return self.shell.send(commands)
|
||||
|
||||
class NetworkModule(AnsibleModule):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(NetworkModule, self).__init__(*args, **kwargs)
|
||||
self.connection = None
|
||||
self._config = None
|
||||
|
||||
@property
|
||||
def config(self):
|
||||
if not self._config:
|
||||
self._config = self.get_config()
|
||||
return self._config
|
||||
|
||||
def _load_params(self):
|
||||
params = super(NetworkModule, self)._load_params()
|
||||
provider = params.get('provider') or dict()
|
||||
for key, value in provider.items():
|
||||
if key in NET_COMMON_ARGS.keys():
|
||||
params[key] = value
|
||||
return params
|
||||
|
||||
def connect(self):
|
||||
if self.params['transport'] == 'nxapi':
|
||||
self.connection = Nxapi(self)
|
||||
else:
|
||||
self.connection = Cli(self)
|
||||
|
||||
try:
|
||||
self.connection.connect()
|
||||
self.execute('terminal length 0')
|
||||
except Exception, exc:
|
||||
self.fail_json(msg=exc.message)
|
||||
|
||||
def configure(self, commands):
|
||||
commands = to_list(commands)
|
||||
if self.params['transport'] == 'cli':
|
||||
commands.insert(0, 'configure terminal')
|
||||
responses = self.execute(commands)
|
||||
responses.pop(0)
|
||||
else:
|
||||
responses = self.execute(commands, command_type='cli_conf')
|
||||
return responses
|
||||
|
||||
def execute(self, commands, **kwargs):
|
||||
try:
|
||||
return self.connection.send(commands, **kwargs)
|
||||
except Exception, exc:
|
||||
self.fail_json(msg=exc.message)
|
||||
|
||||
def disconnect(self):
|
||||
self.connection.close()
|
||||
|
||||
def parse_config(self, cfg):
|
||||
return parse(cfg, indent=2)
|
||||
|
||||
def get_config(self):
|
||||
cmd = 'show running-config'
|
||||
if self.params.get('include_defaults'):
|
||||
cmd += ' all'
|
||||
if self.params['transport'] == 'cli':
|
||||
return self.execute(cmd)[0]
|
||||
else:
|
||||
resp = self.execute(cmd)
|
||||
if not resp.get('ins_api').get('outputs').get('output').get('body'):
|
||||
self.fail_json(msg="Unrecognized response: %s" % str(resp))
|
||||
return resp['ins_api']['outputs']['output']['body']
|
||||
|
||||
def get_module(**kwargs):
|
||||
"""Return instance of NetworkModule
|
||||
"""
|
||||
argument_spec = NET_COMMON_ARGS.copy()
|
||||
if kwargs.get('argument_spec'):
|
||||
argument_spec.update(kwargs['argument_spec'])
|
||||
kwargs['argument_spec'] = argument_spec
|
||||
|
||||
module = NetworkModule(**kwargs)
|
||||
|
||||
# HAS_PARAMIKO is set by module_utils/shell.py
|
||||
if module.params['transport'] == 'cli' and not HAS_PARAMIKO:
|
||||
module.fail_json(msg='paramiko is required but does not appear to be installed')
|
||||
|
||||
module.connect()
|
||||
return module
|
||||
247
lib/ansible/module_utils/openswitch.py
Normal file
247
lib/ansible/module_utils/openswitch.py
Normal file
@@ -0,0 +1,247 @@
|
||||
#
|
||||
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
import time
|
||||
import json
|
||||
|
||||
try:
|
||||
from runconfig import runconfig
|
||||
from opsrest.settings import settings
|
||||
from opsrest.manager import OvsdbConnectionManager
|
||||
from opslib import restparser
|
||||
HAS_OPS = True
|
||||
except ImportError:
|
||||
HAS_OPS = False
|
||||
|
||||
NET_PASSWD_RE = re.compile(r"[\r\n]?password: $", re.I)
|
||||
|
||||
NET_COMMON_ARGS = dict(
|
||||
host=dict(),
|
||||
port=dict(type='int'),
|
||||
username=dict(),
|
||||
password=dict(no_log=True),
|
||||
use_ssl=dict(default=True, type='int'),
|
||||
transport=dict(default='ssh', choices=['ssh', 'cli', 'rest']),
|
||||
provider=dict()
|
||||
)
|
||||
|
||||
def to_list(val):
|
||||
if isinstance(val, (list, tuple)):
|
||||
return list(val)
|
||||
elif val is not None:
|
||||
return [val]
|
||||
else:
|
||||
return list()
|
||||
|
||||
def get_idl():
|
||||
manager = OvsdbConnectionManager(settings.get('ovs_remote'),
|
||||
settings.get('ovs_schema'))
|
||||
manager.start()
|
||||
idl = manager.idl
|
||||
|
||||
init_seq_no = 0
|
||||
while (init_seq_no == idl.change_seqno):
|
||||
idl.run()
|
||||
time.sleep(1)
|
||||
|
||||
return idl
|
||||
|
||||
def get_schema():
|
||||
return restparser.parseSchema(settings.get('ext_schema'))
|
||||
|
||||
def get_runconfig():
|
||||
idl = get_idl()
|
||||
schema = get_schema()
|
||||
return runconfig.RunConfigUtil(idl, schema)
|
||||
|
||||
class Response(object):
|
||||
|
||||
def __init__(self, resp, hdrs):
|
||||
self.body = resp.read()
|
||||
self.headers = hdrs
|
||||
|
||||
@property
|
||||
def json(self):
|
||||
try:
|
||||
return json.loads(self.body)
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
class Rest(object):
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.baseurl = None
|
||||
|
||||
def connect(self):
|
||||
host = self.module.params['host']
|
||||
port = self.module.params['port']
|
||||
|
||||
if self.module.params['use_ssl']:
|
||||
proto = 'https'
|
||||
if not port:
|
||||
port = 443
|
||||
else:
|
||||
proto = 'http'
|
||||
if not port:
|
||||
port = 80
|
||||
|
||||
self.baseurl = '%s://%s:%s/rest/v1' % (proto, host, port)
|
||||
|
||||
def _url_builder(self, path):
|
||||
if path[0] == '/':
|
||||
path = path[1:]
|
||||
return '%s/%s' % (self.baseurl, path)
|
||||
|
||||
def send(self, method, path, data=None, headers=None):
|
||||
url = self._url_builder(path)
|
||||
data = self.module.jsonify(data)
|
||||
|
||||
if headers is None:
|
||||
headers = dict()
|
||||
headers.update({'Content-Type': 'application/json'})
|
||||
|
||||
resp, hdrs = fetch_url(self.module, url, data=data, headers=headers,
|
||||
method=method)
|
||||
|
||||
return Response(resp, hdrs)
|
||||
|
||||
def get(self, path, data=None, headers=None):
|
||||
return self.send('GET', path, data, headers)
|
||||
|
||||
def put(self, path, data=None, headers=None):
|
||||
return self.send('PUT', path, data, headers)
|
||||
|
||||
def post(self, path, data=None, headers=None):
|
||||
return self.send('POST', path, data, headers)
|
||||
|
||||
def delete(self, path, data=None, headers=None):
|
||||
return self.send('DELETE', path, data, headers)
|
||||
|
||||
class Cli(object):
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.shell = None
|
||||
|
||||
def connect(self, **kwargs):
|
||||
host = self.module.params['host']
|
||||
port = self.module.params['port'] or 22
|
||||
|
||||
username = self.module.params['username']
|
||||
password = self.module.params['password']
|
||||
|
||||
self.shell = Shell()
|
||||
self.shell.open(host, port=port, username=username, password=password)
|
||||
|
||||
def send(self, commands, encoding='text'):
|
||||
return self.shell.send(commands)
|
||||
|
||||
class NetworkModule(AnsibleModule):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(NetworkModule, self).__init__(*args, **kwargs)
|
||||
self.connection = None
|
||||
self._config = None
|
||||
self._runconfig = None
|
||||
|
||||
@property
|
||||
def config(self):
|
||||
if not self._config:
|
||||
self._config = self.get_config()
|
||||
return self._config
|
||||
|
||||
def _load_params(self):
|
||||
params = super(NetworkModule, self)._load_params()
|
||||
provider = params.get('provider') or dict()
|
||||
for key, value in provider.items():
|
||||
if key in NET_COMMON_ARGS.keys():
|
||||
params[key] = value
|
||||
return params
|
||||
|
||||
def connect(self):
|
||||
if self.params['transport'] == 'rest':
|
||||
self.connection = Rest(self)
|
||||
elif self.params['transport'] == 'cli':
|
||||
self.connection = Cli(self)
|
||||
|
||||
self.connection.connect()
|
||||
|
||||
def configure(self, config):
|
||||
if self.params['transport'] == 'cli':
|
||||
commands = to_list(config)
|
||||
commands.insert(0, 'configure terminal')
|
||||
responses = self.execute(commands)
|
||||
responses.pop(0)
|
||||
return responses
|
||||
elif self.params['transport'] == 'rest':
|
||||
path = '/system/full-configuration'
|
||||
return self.connection.put(path, data=config)
|
||||
else:
|
||||
if not self._runconfig:
|
||||
self._runconfig = get_runconfig()
|
||||
self._runconfig.write_config_to_db(config)
|
||||
|
||||
def execute(self, commands, **kwargs):
|
||||
try:
|
||||
return self.connection.send(commands, **kwargs)
|
||||
except Exception, exc:
|
||||
self.fail_json(msg=exc.message, commands=commands)
|
||||
|
||||
def disconnect(self):
|
||||
self.connection.close()
|
||||
|
||||
def parse_config(self, cfg):
|
||||
return parse(cfg, indent=4)
|
||||
|
||||
def get_config(self):
|
||||
if self.params['transport'] == 'cli':
|
||||
return self.execute('show running-config')[0]
|
||||
|
||||
elif self.params['transport'] == 'rest':
|
||||
resp = self.connection.get('/system/full-configuration')
|
||||
return resp.json
|
||||
|
||||
else:
|
||||
if not self._runconfig:
|
||||
self._runconfig = get_runconfig()
|
||||
return self._runconfig.get_running_config()
|
||||
|
||||
|
||||
def get_module(**kwargs):
|
||||
"""Return instance of NetworkModule
|
||||
"""
|
||||
argument_spec = NET_COMMON_ARGS.copy()
|
||||
if kwargs.get('argument_spec'):
|
||||
argument_spec.update(kwargs['argument_spec'])
|
||||
kwargs['argument_spec'] = argument_spec
|
||||
|
||||
module = NetworkModule(**kwargs)
|
||||
|
||||
if not HAS_OPS and module.params['transport'] == 'ssh':
|
||||
module.fail_json(msg='could not import ops library')
|
||||
|
||||
# HAS_PARAMIKO is set by module_utils/shell.py
|
||||
if module.params['transport'] == 'cli' and not HAS_PARAMIKO:
|
||||
module.fail_json(msg='paramiko is required but does not appear to be installed')
|
||||
|
||||
if module.params['transport'] in ['cli', 'rest']:
|
||||
module.connect()
|
||||
|
||||
return module
|
||||
|
||||
196
lib/ansible/module_utils/shell.py
Normal file
196
lib/ansible/module_utils/shell.py
Normal file
@@ -0,0 +1,196 @@
|
||||
#
|
||||
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
import re
|
||||
import socket
|
||||
|
||||
from StringIO import StringIO
|
||||
|
||||
try:
|
||||
import paramiko
|
||||
HAS_PARAMIKO = True
|
||||
except ImportError:
|
||||
HAS_PARAMIKO = False
|
||||
|
||||
|
||||
ANSI_RE = re.compile(r'(\x1b\[\?1h\x1b=)')
|
||||
|
||||
CLI_PROMPTS_RE = [
|
||||
re.compile(r'[\r\n]?[a-zA-Z]{1}[a-zA-Z0-9-]*[>|#|%](?:\s*)$'),
|
||||
re.compile(r'[\r\n]?[a-zA-Z]{1}[a-zA-Z0-9-]*\(.+\)#(?:\s*)$')
|
||||
]
|
||||
|
||||
CLI_ERRORS_RE = [
|
||||
re.compile(r"% ?Error"),
|
||||
re.compile(r"^% \w+", re.M),
|
||||
re.compile(r"% ?Bad secret"),
|
||||
re.compile(r"invalid input", re.I),
|
||||
re.compile(r"(?:incomplete|ambiguous) command", re.I),
|
||||
re.compile(r"connection timed out", re.I),
|
||||
re.compile(r"[^\r\n]+ not found", re.I),
|
||||
re.compile(r"'[^']' +returned error code: ?\d+"),
|
||||
]
|
||||
|
||||
def to_list(val):
|
||||
if isinstance(val, (list, tuple)):
|
||||
return list(val)
|
||||
elif val is not None:
|
||||
return [val]
|
||||
else:
|
||||
return list()
|
||||
|
||||
class ShellError(Exception):
|
||||
|
||||
def __init__(self, msg, command=None):
|
||||
super(ShellError, self).__init__(msg)
|
||||
self.message = msg
|
||||
self.command = command
|
||||
|
||||
class Command(object):
|
||||
|
||||
def __init__(self, command, prompt=None, response=None):
|
||||
self.command = command
|
||||
self.prompt = prompt
|
||||
self.response = response
|
||||
|
||||
def __str__(self):
|
||||
return self.command
|
||||
|
||||
class Shell(object):
|
||||
|
||||
def __init__(self):
|
||||
self.ssh = None
|
||||
self.shell = None
|
||||
|
||||
self.prompts = list()
|
||||
self.prompts.extend(CLI_PROMPTS_RE)
|
||||
|
||||
self.errors = list()
|
||||
self.errors.extend(CLI_ERRORS_RE)
|
||||
|
||||
def open(self, host, port=22, username=None, password=None,
|
||||
timeout=10, key_filename=None, pkey=None, look_for_keys=None):
|
||||
|
||||
self.ssh = paramiko.SSHClient()
|
||||
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
||||
|
||||
# unless explicitly set, disable look for keys if a password is
|
||||
# present. this changes the default search order paramiko implements
|
||||
if not look_for_keys:
|
||||
look_for_keys = password is None
|
||||
|
||||
self.ssh.connect(host, port=port, username=username, password=password,
|
||||
timeout=timeout, look_for_keys=look_for_keys, pkey=pkey,
|
||||
key_filename=key_filename)
|
||||
|
||||
self.shell = self.ssh.invoke_shell()
|
||||
self.shell.settimeout(10)
|
||||
self.receive()
|
||||
|
||||
def strip(self, data):
|
||||
return ANSI_RE.sub('', data)
|
||||
|
||||
def receive(self, cmd=None):
|
||||
recv = StringIO()
|
||||
|
||||
while True:
|
||||
data = self.shell.recv(200)
|
||||
|
||||
recv.write(data)
|
||||
recv.seek(recv.tell() - 200)
|
||||
|
||||
window = self.strip(recv.read())
|
||||
|
||||
if isinstance(cmd, Command):
|
||||
self.handle_input(window, prompt=cmd.prompt,
|
||||
response=cmd.response)
|
||||
|
||||
try:
|
||||
if self.read(window):
|
||||
resp = self.strip(recv.getvalue())
|
||||
return self.sanitize(cmd, resp)
|
||||
except ShellError, exc:
|
||||
exc.command = cmd
|
||||
raise
|
||||
|
||||
def send(self, commands):
|
||||
responses = list()
|
||||
try:
|
||||
for command in to_list(commands):
|
||||
cmd = '%s\r' % str(command)
|
||||
self.shell.sendall(cmd)
|
||||
responses.append(self.receive(command))
|
||||
except socket.timeout, exc:
|
||||
raise ShellError("timeout trying to send command", cmd)
|
||||
return responses
|
||||
|
||||
def close(self):
|
||||
self.shell.close()
|
||||
|
||||
def handle_input(self, resp, prompt, response):
|
||||
if not prompt or not response:
|
||||
return
|
||||
|
||||
prompt = to_list(prompt)
|
||||
response = to_list(response)
|
||||
|
||||
for pr, ans in zip(prompt, response):
|
||||
match = pr.search(resp)
|
||||
if match:
|
||||
cmd = '%s\r' % ans
|
||||
self.shell.sendall(cmd)
|
||||
|
||||
def sanitize(self, cmd, resp):
|
||||
cleaned = []
|
||||
for line in resp.splitlines():
|
||||
if line.startswith(str(cmd)) or self.read(line):
|
||||
continue
|
||||
cleaned.append(line)
|
||||
return "\n".join(cleaned)
|
||||
|
||||
def read(self, response):
|
||||
for regex in self.errors:
|
||||
if regex.search(response):
|
||||
raise ShellError('%s' % response)
|
||||
|
||||
for regex in self.prompts:
|
||||
if regex.search(response):
|
||||
return True
|
||||
|
||||
def get_cli_connection(module):
|
||||
host = module.params['host']
|
||||
port = module.params['port']
|
||||
if not port:
|
||||
port = 22
|
||||
|
||||
username = module.params['username']
|
||||
password = module.params['password']
|
||||
|
||||
try:
|
||||
cli = Cli()
|
||||
cli.open(host, port=port, username=username, password=password)
|
||||
except paramiko.ssh_exception.AuthenticationException, exc:
|
||||
module.fail_json(msg=exc.message)
|
||||
except socket.error, exc:
|
||||
host = '%s:%s' % (host, port)
|
||||
module.fail_json(msg=exc.strerror, errno=exc.errno, host=host)
|
||||
except socket.timeout:
|
||||
module.fail_json(msg='socket timed out')
|
||||
|
||||
return cli
|
||||
|
||||
@@ -310,36 +310,45 @@ class NoSSLError(SSLValidationError):
|
||||
"""Needed to connect to an HTTPS url but no ssl library available to verify the certificate"""
|
||||
pass
|
||||
|
||||
# Some environments (Google Compute Engine's CoreOS deploys) do not compile
|
||||
# against openssl and thus do not have any HTTPS support.
|
||||
CustomHTTPSConnection = CustomHTTPSHandler = None
|
||||
if hasattr(httplib, 'HTTPSConnection') and hasattr(urllib2, 'HTTPSHandler'):
|
||||
class CustomHTTPSConnection(httplib.HTTPSConnection):
|
||||
def __init__(self, *args, **kwargs):
|
||||
httplib.HTTPSConnection.__init__(self, *args, **kwargs)
|
||||
if HAS_SSLCONTEXT:
|
||||
self.context = create_default_context()
|
||||
if self.cert_file:
|
||||
self.context.load_cert_chain(self.cert_file, self.key_file)
|
||||
|
||||
class CustomHTTPSConnection(httplib.HTTPSConnection):
|
||||
def __init__(self, *args, **kwargs):
|
||||
httplib.HTTPSConnection.__init__(self, *args, **kwargs)
|
||||
if HAS_SSLCONTEXT:
|
||||
self.context = create_default_context()
|
||||
if self.cert_file:
|
||||
self.context.load_cert_chain(self.cert_file, self.key_file)
|
||||
def connect(self):
|
||||
"Connect to a host on a given (SSL) port."
|
||||
|
||||
def connect(self):
|
||||
"Connect to a host on a given (SSL) port."
|
||||
if hasattr(self, 'source_address'):
|
||||
sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address)
|
||||
else:
|
||||
sock = socket.create_connection((self.host, self.port), self.timeout)
|
||||
|
||||
if hasattr(self, 'source_address'):
|
||||
sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address)
|
||||
else:
|
||||
sock = socket.create_connection((self.host, self.port), self.timeout)
|
||||
if self._tunnel_host:
|
||||
self.sock = sock
|
||||
self._tunnel()
|
||||
if HAS_SSLCONTEXT:
|
||||
self.sock = self.context.wrap_socket(sock, server_hostname=self.host)
|
||||
else:
|
||||
self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=PROTOCOL)
|
||||
server_hostname = self.host
|
||||
# Note: self._tunnel_host is not available on py < 2.6 but this code
|
||||
# isn't used on py < 2.6 (lack of create_connection)
|
||||
if self._tunnel_host:
|
||||
self.sock = sock
|
||||
self._tunnel()
|
||||
server_hostname = self._tunnel_host
|
||||
|
||||
class CustomHTTPSHandler(urllib2.HTTPSHandler):
|
||||
if HAS_SSLCONTEXT:
|
||||
self.sock = self.context.wrap_socket(sock, server_hostname=server_hostname)
|
||||
else:
|
||||
self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=PROTOCOL)
|
||||
|
||||
def https_open(self, req):
|
||||
return self.do_open(CustomHTTPSConnection, req)
|
||||
class CustomHTTPSHandler(urllib2.HTTPSHandler):
|
||||
|
||||
https_request = urllib2.AbstractHTTPHandler.do_request_
|
||||
def https_open(self, req):
|
||||
return self.do_open(CustomHTTPSConnection, req)
|
||||
|
||||
https_request = urllib2.AbstractHTTPHandler.do_request_
|
||||
|
||||
def generic_urlparse(parts):
|
||||
'''
|
||||
@@ -373,7 +382,10 @@ def generic_urlparse(parts):
|
||||
# get the username, password, etc.
|
||||
try:
|
||||
netloc_re = re.compile(r'^((?:\w)+(?::(?:\w)+)?@)?([A-Za-z0-9.-]+)(:\d+)?$')
|
||||
(auth, hostname, port) = netloc_re.match(parts[1])
|
||||
match = netloc_re.match(parts[1])
|
||||
auth = match.group(1)
|
||||
hostname = match.group(2)
|
||||
port = match.group(3)
|
||||
if port:
|
||||
# the capture group for the port will include the ':',
|
||||
# so remove it and convert the port to an integer
|
||||
@@ -383,6 +395,8 @@ def generic_urlparse(parts):
|
||||
# and then split it up based on the first ':' found
|
||||
auth = auth[:-1]
|
||||
username, password = auth.split(':', 1)
|
||||
else:
|
||||
username = password = None
|
||||
generic_parts['username'] = username
|
||||
generic_parts['password'] = password
|
||||
generic_parts['hostname'] = hostname
|
||||
@@ -390,7 +404,7 @@ def generic_urlparse(parts):
|
||||
except:
|
||||
generic_parts['username'] = None
|
||||
generic_parts['password'] = None
|
||||
generic_parts['hostname'] = None
|
||||
generic_parts['hostname'] = parts[1]
|
||||
generic_parts['port'] = None
|
||||
return generic_parts
|
||||
|
||||
@@ -532,7 +546,8 @@ class SSLValidationHandler(urllib2.BaseHandler):
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
if https_proxy:
|
||||
proxy_parts = generic_urlparse(urlparse.urlparse(https_proxy))
|
||||
s.connect((proxy_parts.get('hostname'), proxy_parts.get('port')))
|
||||
port = proxy_parts.get('port') or 443
|
||||
s.connect((proxy_parts.get('hostname'), port))
|
||||
if proxy_parts.get('scheme') == 'http':
|
||||
s.sendall(self.CONNECT_COMMAND % (self.hostname, self.port))
|
||||
if proxy_parts.get('username'):
|
||||
@@ -542,7 +557,7 @@ class SSLValidationHandler(urllib2.BaseHandler):
|
||||
connect_result = s.recv(4096)
|
||||
self.validate_proxy_response(connect_result)
|
||||
if context:
|
||||
ssl_s = context.wrap_socket(s, server_hostname=proxy_parts.get('hostname'))
|
||||
ssl_s = context.wrap_socket(s, server_hostname=self.hostname)
|
||||
else:
|
||||
ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL)
|
||||
match_hostname(ssl_s.getpeercert(), self.hostname)
|
||||
@@ -661,8 +676,9 @@ def open_url(url, data=None, headers=None, method=None, use_proxy=True,
|
||||
handlers.append(proxyhandler)
|
||||
|
||||
# pre-2.6 versions of python cannot use the custom https
|
||||
# handler, since the socket class is lacking this method
|
||||
if hasattr(socket, 'create_connection'):
|
||||
# handler, since the socket class is lacking create_connection.
|
||||
# Some python builds lack HTTPS support.
|
||||
if hasattr(socket, 'create_connection') and CustomHTTPSHandler:
|
||||
handlers.append(CustomHTTPSHandler)
|
||||
|
||||
opener = urllib2.build_opener(*handlers)
|
||||
|
||||
@@ -35,8 +35,8 @@ class VcaError(Exception):
|
||||
|
||||
def vca_argument_spec():
|
||||
return dict(
|
||||
username=dict(),
|
||||
password=dict(),
|
||||
username=dict(type='str', aliases=['user'], required=True),
|
||||
password=dict(type='str', aliases=['pass','passwd'], required=True, no_log=True),
|
||||
org=dict(),
|
||||
service_id=dict(),
|
||||
instance_id=dict(),
|
||||
@@ -108,7 +108,10 @@ class VcaAnsibleModule(AnsibleModule):
|
||||
|
||||
def create_instance(self):
|
||||
service_type = self.params.get('service_type', DEFAULT_SERVICE_TYPE)
|
||||
host = self.params.get('host', LOGIN_HOST.get('service_type'))
|
||||
if service_type == 'vcd':
|
||||
host = self.params['host']
|
||||
else:
|
||||
host = LOGIN_HOST[service_type]
|
||||
username = self.params['username']
|
||||
|
||||
version = self.params.get('api_version')
|
||||
|
||||
Submodule lib/ansible/modules/core updated: 572771d0b1...09e2457eb0
Submodule lib/ansible/modules/extras updated: e5362cc76a...e8427cb32a
@@ -52,9 +52,7 @@ class DataLoader():
|
||||
Usage:
|
||||
|
||||
dl = DataLoader()
|
||||
(or)
|
||||
dl = DataLoader(vault_password='foo')
|
||||
|
||||
# optionally: dl.set_vault_password('foo')
|
||||
ds = dl.load('...')
|
||||
ds = dl.load_from_file('/path/to/file')
|
||||
'''
|
||||
|
||||
@@ -21,7 +21,7 @@ __metaclass__ = type
|
||||
|
||||
from ansible.compat.six import iteritems, string_types
|
||||
|
||||
from ansible.errors import AnsibleParserError
|
||||
from ansible.errors import AnsibleParserError,AnsibleError
|
||||
from ansible.plugins import module_loader
|
||||
from ansible.parsing.splitter import parse_kv, split_args
|
||||
from ansible.template import Templar
|
||||
@@ -137,7 +137,16 @@ class ModuleArgsParser:
|
||||
# than those which may be parsed/normalized next
|
||||
final_args = dict()
|
||||
if additional_args:
|
||||
final_args.update(additional_args)
|
||||
if isinstance(additional_args, string_types):
|
||||
templar = Templar(loader=None)
|
||||
if templar._contains_vars(additional_args):
|
||||
final_args['_variable_params'] = additional_args
|
||||
else:
|
||||
raise AnsibleParserError("Complex args containing variables cannot use bare variables, and must use the full variable style ('{{var_name}}')")
|
||||
elif isinstance(additional_args, dict):
|
||||
final_args.update(additional_args)
|
||||
else:
|
||||
raise AnsibleParserError('Complex args must be a dictionary or variable string ("{{var}}").')
|
||||
|
||||
# how we normalize depends if we figured out what the module name is
|
||||
# yet. If we have already figured it out, it's an 'old style' invocation.
|
||||
@@ -155,6 +164,13 @@ class ModuleArgsParser:
|
||||
tmp_args = parse_kv(tmp_args)
|
||||
args.update(tmp_args)
|
||||
|
||||
# only internal variables can start with an underscore, so
|
||||
# we don't allow users to set them directy in arguments
|
||||
if args and action not in ('command', 'shell', 'script', 'raw'):
|
||||
for arg in args:
|
||||
if arg.startswith('_ansible_'):
|
||||
raise AnsibleError("invalid parameter specified for action '%s': '%s'" % (action, arg))
|
||||
|
||||
# finally, update the args we're going to return with the ones
|
||||
# which were normalized above
|
||||
if args:
|
||||
@@ -206,18 +222,21 @@ class ModuleArgsParser:
|
||||
action = None
|
||||
args = None
|
||||
|
||||
actions_allowing_raw = ('command', 'shell', 'script', 'raw')
|
||||
if isinstance(thing, dict):
|
||||
# form is like: copy: { src: 'a', dest: 'b' } ... common for structured (aka "complex") args
|
||||
thing = thing.copy()
|
||||
if 'module' in thing:
|
||||
action = thing['module']
|
||||
action, module_args = self._split_module_string(thing['module'])
|
||||
args = thing.copy()
|
||||
check_raw = action in actions_allowing_raw
|
||||
args.update(parse_kv(module_args, check_raw=check_raw))
|
||||
del args['module']
|
||||
|
||||
elif isinstance(thing, string_types):
|
||||
# form is like: copy: src=a dest=b ... common shorthand throughout ansible
|
||||
(action, args) = self._split_module_string(thing)
|
||||
check_raw = action in ('command', 'shell', 'script', 'raw')
|
||||
check_raw = action in actions_allowing_raw
|
||||
args = parse_kv(args, check_raw=check_raw)
|
||||
|
||||
else:
|
||||
|
||||
@@ -65,8 +65,8 @@ def parse_kv(args, check_raw=False):
|
||||
raise
|
||||
|
||||
raw_params = []
|
||||
for x in vargs:
|
||||
x = _decode_escapes(x)
|
||||
for orig_x in vargs:
|
||||
x = _decode_escapes(orig_x)
|
||||
if "=" in x:
|
||||
pos = 0
|
||||
try:
|
||||
@@ -83,19 +83,14 @@ def parse_kv(args, check_raw=False):
|
||||
k = x[:pos]
|
||||
v = x[pos + 1:]
|
||||
|
||||
# only internal variables can start with an underscore, so
|
||||
# we don't allow users to set them directy in arguments
|
||||
if k.startswith('_'):
|
||||
raise AnsibleError("invalid parameter specified: '%s'" % k)
|
||||
|
||||
# FIXME: make the retrieval of this list of shell/command
|
||||
# options a function, so the list is centralized
|
||||
if check_raw and k not in ('creates', 'removes', 'chdir', 'executable', 'warn'):
|
||||
raw_params.append(x)
|
||||
raw_params.append(orig_x)
|
||||
else:
|
||||
options[k.strip()] = unquote(v.strip())
|
||||
else:
|
||||
raw_params.append(x)
|
||||
raw_params.append(orig_x)
|
||||
|
||||
# recombine the free-form params, if any were found, and assign
|
||||
# them to a special option for use later by the shell/command module
|
||||
|
||||
@@ -20,6 +20,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import re
|
||||
from ansible.errors import AnsibleParserError, AnsibleError
|
||||
|
||||
# Components that match a numeric or alphanumeric begin:end or begin:end:step
|
||||
# range expression inside square brackets.
|
||||
@@ -162,6 +163,7 @@ patterns = {
|
||||
$
|
||||
'''.format(label=label), re.X|re.I|re.UNICODE
|
||||
),
|
||||
|
||||
}
|
||||
|
||||
def parse_address(address, allow_ranges=False):
|
||||
@@ -183,8 +185,8 @@ def parse_address(address, allow_ranges=False):
|
||||
# First, we extract the port number if one is specified.
|
||||
|
||||
port = None
|
||||
for type in ['bracketed_hostport', 'hostport']:
|
||||
m = patterns[type].match(address)
|
||||
for matching in ['bracketed_hostport', 'hostport']:
|
||||
m = patterns[matching].match(address)
|
||||
if m:
|
||||
(address, port) = m.groups()
|
||||
port = int(port)
|
||||
@@ -194,22 +196,20 @@ def parse_address(address, allow_ranges=False):
|
||||
# numeric ranges, or a hostname with alphanumeric ranges.
|
||||
|
||||
host = None
|
||||
for type in ['ipv4', 'ipv6', 'hostname']:
|
||||
m = patterns[type].match(address)
|
||||
for matching in ['ipv4', 'ipv6', 'hostname']:
|
||||
m = patterns[matching].match(address)
|
||||
if m:
|
||||
host = address
|
||||
continue
|
||||
|
||||
# If it isn't any of the above, we don't understand it.
|
||||
|
||||
if not host:
|
||||
return (None, None)
|
||||
|
||||
# If we get to this point, we know that any included ranges are valid. If
|
||||
# the caller is prepared to handle them, all is well. Otherwise we treat
|
||||
# it as a parse failure.
|
||||
raise AnsibleError("Not a valid network hostname: %s" % address)
|
||||
|
||||
# If we get to this point, we know that any included ranges are valid.
|
||||
# If the caller is prepared to handle them, all is well.
|
||||
# Otherwise we treat it as a parse failure.
|
||||
if not allow_ranges and '[' in host:
|
||||
return (None, None)
|
||||
raise AnsibleParserError("Detected range in host but was asked to ignore ranges")
|
||||
|
||||
return (host, port)
|
||||
|
||||
@@ -22,6 +22,7 @@ import shlex
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
import random
|
||||
from io import BytesIO
|
||||
from subprocess import call
|
||||
from ansible.errors import AnsibleError
|
||||
@@ -70,7 +71,7 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.compat.six import PY3, byte2int
|
||||
from ansible.compat.six import PY3
|
||||
from ansible.utils.unicode import to_unicode, to_bytes
|
||||
|
||||
HAS_ANY_PBKDF2HMAC = HAS_PBKDF2 or HAS_PBKDF2HMAC
|
||||
@@ -220,21 +221,91 @@ class VaultEditor:
|
||||
def __init__(self, password):
|
||||
self.vault = VaultLib(password)
|
||||
|
||||
def _shred_file_custom(self, tmp_path):
|
||||
""""Destroy a file, when shred (core-utils) is not available
|
||||
|
||||
Unix `shred' destroys files "so that they can be recovered only with great difficulty with
|
||||
specialised hardware, if at all". It is based on the method from the paper
|
||||
"Secure Deletion of Data from Magnetic and Solid-State Memory",
|
||||
Proceedings of the Sixth USENIX Security Symposium (San Jose, California, July 22-25, 1996).
|
||||
|
||||
We do not go to that length to re-implement shred in Python; instead, overwriting with a block
|
||||
of random data should suffice.
|
||||
|
||||
See https://github.com/ansible/ansible/pull/13700 .
|
||||
"""
|
||||
|
||||
file_len = os.path.getsize(tmp_path)
|
||||
|
||||
if file_len > 0: # avoid work when file was empty
|
||||
max_chunk_len = min(1024*1024*2, file_len)
|
||||
|
||||
passes = 3
|
||||
with open(tmp_path, "wb") as fh:
|
||||
for _ in range(passes):
|
||||
fh.seek(0, 0)
|
||||
# get a random chunk of data, each pass with other length
|
||||
chunk_len = random.randint(max_chunk_len//2, max_chunk_len)
|
||||
data = os.urandom(chunk_len)
|
||||
|
||||
for _ in range(0, file_len // chunk_len):
|
||||
fh.write(data)
|
||||
fh.write(data[:file_len % chunk_len])
|
||||
|
||||
assert(fh.tell() == file_len) # FIXME remove this assert once we have unittests to check its accuracy
|
||||
os.fsync(fh)
|
||||
|
||||
|
||||
def _shred_file(self, tmp_path):
|
||||
"""Securely destroy a decrypted file
|
||||
|
||||
Note standard limitations of GNU shred apply (For flash, overwriting would have no effect
|
||||
due to wear leveling; for other storage systems, the async kernel->filesystem->disk calls never
|
||||
guarantee data hits the disk; etc). Furthermore, if your tmp dirs is on tmpfs (ramdisks),
|
||||
it is a non-issue.
|
||||
|
||||
Nevertheless, some form of overwriting the data (instead of just removing the fs index entry) is
|
||||
a good idea. If shred is not available (e.g. on windows, or no core-utils installed), fall back on
|
||||
a custom shredding method.
|
||||
"""
|
||||
|
||||
if not os.path.isfile(tmp_path):
|
||||
# file is already gone
|
||||
return
|
||||
|
||||
try:
|
||||
r = call(['shred', tmp_path])
|
||||
except OSError:
|
||||
# shred is not available on this system, or some other error occured.
|
||||
r = 1
|
||||
|
||||
if r != 0:
|
||||
# we could not successfully execute unix shred; therefore, do custom shred.
|
||||
self._shred_file_custom(tmp_path)
|
||||
|
||||
os.remove(tmp_path)
|
||||
|
||||
def _edit_file_helper(self, filename, existing_data=None, force_save=False):
|
||||
|
||||
# Create a tempfile
|
||||
_, tmp_path = tempfile.mkstemp()
|
||||
|
||||
if existing_data:
|
||||
self.write_data(existing_data, tmp_path)
|
||||
self.write_data(existing_data, tmp_path, shred=False)
|
||||
|
||||
# drop the user into an editor on the tmp file
|
||||
call(self._editor_shell_command(tmp_path))
|
||||
try:
|
||||
call(self._editor_shell_command(tmp_path))
|
||||
except:
|
||||
# whatever happens, destroy the decrypted file
|
||||
self._shred_file(tmp_path)
|
||||
raise
|
||||
|
||||
tmpdata = self.read_data(tmp_path)
|
||||
|
||||
# Do nothing if the content has not changed
|
||||
if existing_data == tmpdata and not force_save:
|
||||
os.remove(tmp_path)
|
||||
self._shred_file(tmp_path)
|
||||
return
|
||||
|
||||
# encrypt new data and write out to tmp
|
||||
@@ -258,7 +329,7 @@ class VaultEditor:
|
||||
|
||||
ciphertext = self.read_data(filename)
|
||||
plaintext = self.vault.decrypt(ciphertext)
|
||||
self.write_data(plaintext, output_file or filename)
|
||||
self.write_data(plaintext, output_file or filename, shred=False)
|
||||
|
||||
def create_file(self, filename):
|
||||
""" create a new encrypted file """
|
||||
@@ -323,13 +394,21 @@ class VaultEditor:
|
||||
|
||||
return data
|
||||
|
||||
def write_data(self, data, filename):
|
||||
def write_data(self, data, filename, shred=True):
|
||||
"""write data to given path
|
||||
|
||||
if shred==True, make sure that the original data is first shredded so
|
||||
that is cannot be recovered
|
||||
"""
|
||||
bytes = to_bytes(data, errors='strict')
|
||||
if filename == '-':
|
||||
sys.stdout.write(bytes)
|
||||
else:
|
||||
if os.path.isfile(filename):
|
||||
os.remove(filename)
|
||||
if shred:
|
||||
self._shred_file(filename)
|
||||
else:
|
||||
os.remove(filename)
|
||||
with open(filename, "wb") as fh:
|
||||
fh.write(bytes)
|
||||
|
||||
@@ -338,6 +417,7 @@ class VaultEditor:
|
||||
# overwrite dest with src
|
||||
if os.path.isfile(dest):
|
||||
prev = os.stat(dest)
|
||||
# old file 'dest' was encrypted, no need to _shred_file
|
||||
os.remove(dest)
|
||||
shutil.move(src, dest)
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ __metaclass__ = type
|
||||
import yaml
|
||||
from ansible.compat.six import PY3
|
||||
|
||||
from ansible.parsing.yaml.objects import AnsibleUnicode
|
||||
from ansible.parsing.yaml.objects import AnsibleUnicode, AnsibleSequence, AnsibleMapping
|
||||
from ansible.vars.hostvars import HostVars
|
||||
|
||||
class AnsibleDumper(yaml.SafeDumper):
|
||||
@@ -50,3 +50,13 @@ AnsibleDumper.add_representer(
|
||||
represent_hostvars,
|
||||
)
|
||||
|
||||
AnsibleDumper.add_representer(
|
||||
AnsibleSequence,
|
||||
yaml.representer.SafeRepresenter.represent_list,
|
||||
)
|
||||
|
||||
AnsibleDumper.add_representer(
|
||||
AnsibleMapping,
|
||||
yaml.representer.SafeRepresenter.represent_dict,
|
||||
)
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ from ansible.errors import AnsibleParserError
|
||||
from ansible.playbook.play import Play
|
||||
from ansible.playbook.playbook_include import PlaybookInclude
|
||||
from ansible.plugins import get_all_plugin_loaders
|
||||
from ansible import constants as C
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
@@ -44,6 +45,7 @@ class Playbook:
|
||||
self._entries = []
|
||||
self._basedir = os.getcwd()
|
||||
self._loader = loader
|
||||
self._file_name = None
|
||||
|
||||
@staticmethod
|
||||
def load(file_name, variable_manager=None, loader=None):
|
||||
@@ -61,6 +63,8 @@ class Playbook:
|
||||
# set the loaders basedir
|
||||
self._loader.set_basedir(self._basedir)
|
||||
|
||||
self._file_name = file_name
|
||||
|
||||
# dynamically load any plugins from the playbook directory
|
||||
for name, obj in get_all_plugin_loaders():
|
||||
if obj.subdir:
|
||||
@@ -84,7 +88,7 @@ class Playbook:
|
||||
if pb is not None:
|
||||
self._entries.extend(pb._entries)
|
||||
else:
|
||||
display.display("skipping playbook include '%s' due to conditional test failure" % entry.get('include', entry), color='cyan')
|
||||
display.display("skipping playbook include '%s' due to conditional test failure" % entry.get('include', entry), color=C.COLOR_SKIP)
|
||||
else:
|
||||
entry_obj = Play.load(entry, variable_manager=variable_manager, loader=self._loader)
|
||||
self._entries.append(entry_obj)
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from copy import deepcopy
|
||||
|
||||
class Attribute:
|
||||
|
||||
@@ -32,6 +33,11 @@ class Attribute:
|
||||
self.priority = priority
|
||||
self.always_post_validate = always_post_validate
|
||||
|
||||
if default is not None and self.isa in ('list', 'dict', 'set'):
|
||||
self.default = deepcopy(default)
|
||||
else:
|
||||
self.default = default
|
||||
|
||||
def __eq__(self, other):
|
||||
return other.priority == self.priority
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ import uuid
|
||||
from functools import partial
|
||||
from inspect import getmembers
|
||||
|
||||
from ansible.compat.six import iteritems, string_types, text_type
|
||||
from ansible.compat.six import iteritems, string_types
|
||||
|
||||
from jinja2.exceptions import UndefinedError
|
||||
|
||||
@@ -36,6 +36,7 @@ from ansible.parsing.dataloader import DataLoader
|
||||
from ansible.playbook.attribute import Attribute, FieldAttribute
|
||||
from ansible.utils.boolean import boolean
|
||||
from ansible.utils.vars import combine_vars, isidentifier
|
||||
from ansible.utils.unicode import to_unicode
|
||||
|
||||
BASE_ATTRIBUTES = {}
|
||||
|
||||
@@ -48,7 +49,7 @@ class Base:
|
||||
_remote_user = FieldAttribute(isa='string')
|
||||
|
||||
# variables
|
||||
_vars = FieldAttribute(isa='dict', default=dict(), priority=100)
|
||||
_vars = FieldAttribute(isa='dict', priority=100)
|
||||
|
||||
# flags and misc. settings
|
||||
_environment = FieldAttribute(isa='list')
|
||||
@@ -76,6 +77,10 @@ class Base:
|
||||
# and initialize the base attributes
|
||||
self._initialize_base_attributes()
|
||||
|
||||
# and init vars, avoid using defaults in field declaration as it lives across plays
|
||||
self.vars = dict()
|
||||
|
||||
|
||||
# The following three functions are used to programatically define data
|
||||
# descriptors (aka properties) for the Attributes of all of the playbook
|
||||
# objects (tasks, blocks, plays, etc).
|
||||
@@ -148,7 +153,7 @@ class Base:
|
||||
setattr(Base, name, property(getter, setter, deleter))
|
||||
|
||||
# Place the value into the instance so that the property can
|
||||
# process and hold that value/
|
||||
# process and hold that value.
|
||||
setattr(self, name, value.default)
|
||||
|
||||
def preprocess_data(self, ds):
|
||||
@@ -310,7 +315,7 @@ class Base:
|
||||
# and make sure the attribute is of the type it should be
|
||||
if value is not None:
|
||||
if attribute.isa == 'string':
|
||||
value = text_type(value)
|
||||
value = to_unicode(value)
|
||||
elif attribute.isa == 'int':
|
||||
value = int(value)
|
||||
elif attribute.isa == 'float':
|
||||
|
||||
@@ -90,16 +90,18 @@ class Become:
|
||||
|
||||
display.deprecated("Instead of su/su_user, use become/become_user and set become_method to 'su' (default is sudo)")
|
||||
|
||||
# if we are becoming someone else, but some fields are unset,
|
||||
# make sure they're initialized to the default config values
|
||||
if ds.get('become', False):
|
||||
if ds.get('become_method', None) is None:
|
||||
ds['become_method'] = C.DEFAULT_BECOME_METHOD
|
||||
if ds.get('become_user', None) is None:
|
||||
ds['become_user'] = C.DEFAULT_BECOME_USER
|
||||
|
||||
return ds
|
||||
|
||||
def set_become_defaults(self, become, become_method, become_user):
|
||||
''' if we are becoming someone else, but some fields are unset,
|
||||
make sure they're initialized to the default config values '''
|
||||
if become:
|
||||
if become_method is None:
|
||||
become_method = C.DEFAULT_BECOME_METHOD
|
||||
if become_user is None:
|
||||
become_user = C.DEFAULT_BECOME_USER
|
||||
|
||||
def _get_attr_become(self):
|
||||
'''
|
||||
Override for the 'become' getattr fetcher, used from Base.
|
||||
|
||||
@@ -34,6 +34,8 @@ class Block(Base, Become, Conditional, Taggable):
|
||||
_rescue = FieldAttribute(isa='list', default=[])
|
||||
_always = FieldAttribute(isa='list', default=[])
|
||||
_delegate_to = FieldAttribute(isa='list')
|
||||
_delegate_facts = FieldAttribute(isa='bool', default=False)
|
||||
_any_errors_fatal = FieldAttribute(isa='bool')
|
||||
|
||||
# for future consideration? this would be functionally
|
||||
# similar to the 'else' clause for exceptions
|
||||
@@ -42,11 +44,20 @@ class Block(Base, Become, Conditional, Taggable):
|
||||
def __init__(self, play=None, parent_block=None, role=None, task_include=None, use_handlers=False, implicit=False):
|
||||
self._play = play
|
||||
self._role = role
|
||||
self._task_include = task_include
|
||||
self._parent_block = parent_block
|
||||
self._task_include = None
|
||||
self._parent_block = None
|
||||
self._use_handlers = use_handlers
|
||||
self._implicit = implicit
|
||||
self._dep_chain = []
|
||||
|
||||
if task_include:
|
||||
self._task_include = task_include
|
||||
elif parent_block:
|
||||
self._parent_block = parent_block
|
||||
|
||||
if parent_block:
|
||||
self._dep_chain = parent_block._dep_chain[:]
|
||||
else:
|
||||
self._dep_chain = []
|
||||
|
||||
super(Block, self).__init__()
|
||||
|
||||
@@ -329,6 +340,16 @@ class Block(Base, Become, Conditional, Taggable):
|
||||
|
||||
return environment
|
||||
|
||||
def _get_attr_any_errors_fatal(self):
|
||||
'''
|
||||
Override for the 'tags' getattr fetcher, used from Base.
|
||||
'''
|
||||
any_errors_fatal = self._attributes['any_errors_fatal']
|
||||
if hasattr(self, '_get_parent_attribute'):
|
||||
if self._get_parent_attribute('any_errors_fatal'):
|
||||
any_errors_fatal = True
|
||||
return any_errors_fatal
|
||||
|
||||
def filter_tagged_tasks(self, play_context, all_vars):
|
||||
'''
|
||||
Creates a new block, with task lists filtered based on the tags contained
|
||||
@@ -340,7 +361,9 @@ class Block(Base, Become, Conditional, Taggable):
|
||||
for task in target:
|
||||
if isinstance(task, Block):
|
||||
tmp_list.append(evaluate_block(task))
|
||||
elif task.action in ('meta', 'include') or task.evaluate_tags(play_context.only_tags, play_context.skip_tags, all_vars=all_vars):
|
||||
elif task.action == 'meta' \
|
||||
or (task.action == 'include' and task.evaluate_tags([], play_context.skip_tags, all_vars=all_vars)) \
|
||||
or task.evaluate_tags(play_context.only_tags, play_context.skip_tags, all_vars=all_vars):
|
||||
tmp_list.append(task)
|
||||
return tmp_list
|
||||
|
||||
@@ -355,3 +378,4 @@ class Block(Base, Become, Conditional, Taggable):
|
||||
|
||||
def has_tasks(self):
|
||||
return len(self.block) > 0 or len(self.rescue) > 0 or len(self.always) > 0
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ __metaclass__ = type
|
||||
from jinja2.exceptions import UndefinedError
|
||||
|
||||
from ansible.compat.six import text_type
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.errors import AnsibleError, AnsibleUndefinedVariable
|
||||
from ansible.playbook.attribute import FieldAttribute
|
||||
from ansible.template import Templar
|
||||
|
||||
@@ -89,16 +89,22 @@ class Conditional:
|
||||
# make sure the templar is using the variables specifed to this method
|
||||
templar.set_available_variables(variables=all_vars)
|
||||
|
||||
conditional = templar.template(conditional)
|
||||
if not isinstance(conditional, basestring) or conditional == "":
|
||||
return conditional
|
||||
try:
|
||||
conditional = templar.template(conditional)
|
||||
if not isinstance(conditional, text_type) or conditional == "":
|
||||
return conditional
|
||||
|
||||
# a Jinja2 evaluation that results in something Python can eval!
|
||||
presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
|
||||
conditional = templar.template(presented, fail_on_undefined=False)
|
||||
|
||||
val = conditional.strip()
|
||||
if val == presented:
|
||||
# a Jinja2 evaluation that results in something Python can eval!
|
||||
presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
|
||||
conditional = templar.template(presented)
|
||||
val = conditional.strip()
|
||||
if val == "True":
|
||||
return True
|
||||
elif val == "False":
|
||||
return False
|
||||
else:
|
||||
raise AnsibleError("unable to evaluate conditional: %s" % original)
|
||||
except (AnsibleUndefinedVariable, UndefinedError) as e:
|
||||
# the templating failed, meaning most likely a
|
||||
# variable was undefined. If we happened to be
|
||||
# looking for an undefined variable, return True,
|
||||
@@ -108,11 +114,5 @@ class Conditional:
|
||||
elif "is defined" in original:
|
||||
return False
|
||||
else:
|
||||
raise AnsibleError("error while evaluating conditional: %s (%s)" % (original, presented))
|
||||
elif val == "True":
|
||||
return True
|
||||
elif val == "False":
|
||||
return False
|
||||
else:
|
||||
raise AnsibleError("unable to evaluate conditional: %s" % original)
|
||||
raise AnsibleError("error while evaluating conditional (%s): %s" % (original, e))
|
||||
|
||||
|
||||
@@ -24,6 +24,12 @@ import os
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.template import Templar
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
except ImportError:
|
||||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
class IncludedFile:
|
||||
|
||||
def __init__(self, filename, args, task):
|
||||
@@ -43,9 +49,15 @@ class IncludedFile:
|
||||
return "%s (%s): %s" % (self._filename, self._args, self._hosts)
|
||||
|
||||
@staticmethod
|
||||
def process_include_results(results, tqm, iterator, loader, variable_manager):
|
||||
def process_include_results(results, tqm, iterator, inventory, loader, variable_manager):
|
||||
included_files = []
|
||||
|
||||
def get_original_host(host):
|
||||
if host.name in inventory._hosts_cache:
|
||||
return inventory._hosts_cache[host.name]
|
||||
else:
|
||||
return inventory.get_host(host.name)
|
||||
|
||||
for res in results:
|
||||
|
||||
if res._task.action == 'include':
|
||||
@@ -61,9 +73,10 @@ class IncludedFile:
|
||||
if 'skipped' in include_result and include_result['skipped'] or 'failed' in include_result:
|
||||
continue
|
||||
|
||||
original_task = iterator.get_original_task(res._host, res._task)
|
||||
original_host = get_original_host(res._host)
|
||||
original_task = iterator.get_original_task(original_host, res._task)
|
||||
|
||||
task_vars = variable_manager.get_vars(loader=loader, play=iterator._play, host=res._host, task=original_task)
|
||||
task_vars = variable_manager.get_vars(loader=loader, play=iterator._play, host=original_host, task=original_task)
|
||||
templar = Templar(loader=loader, variables=task_vars)
|
||||
|
||||
include_variables = include_result.get('include_variables', dict())
|
||||
@@ -75,14 +88,19 @@ class IncludedFile:
|
||||
# handle relative includes by walking up the list of parent include
|
||||
# tasks and checking the relative result to see if it exists
|
||||
parent_include = original_task._task_include
|
||||
cumulative_path = None
|
||||
while parent_include is not None:
|
||||
parent_include_dir = templar.template(os.path.dirname(parent_include.args.get('_raw_params')))
|
||||
if cumulative_path is None:
|
||||
cumulative_path = parent_include_dir
|
||||
elif not os.path.isabs(cumulative_path):
|
||||
cumulative_path = os.path.join(parent_include_dir, cumulative_path)
|
||||
include_target = templar.template(include_result['include'])
|
||||
if original_task._role:
|
||||
new_basedir = os.path.join(original_task._role._role_path, 'tasks', parent_include_dir)
|
||||
new_basedir = os.path.join(original_task._role._role_path, 'tasks', cumulative_path)
|
||||
include_file = loader.path_dwim_relative(new_basedir, 'tasks', include_target)
|
||||
else:
|
||||
include_file = loader.path_dwim_relative(loader.get_basedir(), parent_include_dir, include_target)
|
||||
include_file = loader.path_dwim_relative(loader.get_basedir(), cumulative_path, include_target)
|
||||
|
||||
if os.path.exists(include_file):
|
||||
break
|
||||
@@ -105,6 +123,6 @@ class IncludedFile:
|
||||
except ValueError:
|
||||
included_files.append(inc_file)
|
||||
|
||||
inc_file.add_host(res._host)
|
||||
inc_file.add_host(original_host)
|
||||
|
||||
return included_files
|
||||
|
||||
@@ -64,7 +64,7 @@ class Play(Base, Taggable, Become):
|
||||
|
||||
# Connection
|
||||
_gather_facts = FieldAttribute(isa='bool', default=None, always_post_validate=True)
|
||||
_hosts = FieldAttribute(isa='list', default=[], required=True, listof=string_types, always_post_validate=True)
|
||||
_hosts = FieldAttribute(isa='list', required=True, listof=string_types, always_post_validate=True)
|
||||
_name = FieldAttribute(isa='string', default='', always_post_validate=True)
|
||||
|
||||
# Variable Attributes
|
||||
|
||||
@@ -125,6 +125,18 @@ TASK_ATTRIBUTE_OVERRIDES = (
|
||||
'remote_user',
|
||||
)
|
||||
|
||||
RESET_VARS = (
|
||||
'ansible_connection',
|
||||
'ansible_ssh_host',
|
||||
'ansible_ssh_pass',
|
||||
'ansible_ssh_port',
|
||||
'ansible_ssh_user',
|
||||
'ansible_ssh_private_key_file',
|
||||
'ansible_ssh_pipelining',
|
||||
'ansible_user',
|
||||
'ansible_host',
|
||||
'ansible_port',
|
||||
)
|
||||
|
||||
class PlayContext(Base):
|
||||
|
||||
@@ -316,6 +328,13 @@ class PlayContext(Base):
|
||||
# the host name in the delegated variable dictionary here
|
||||
delegated_host_name = templar.template(task.delegate_to)
|
||||
delegated_vars = variables.get('ansible_delegated_vars', dict()).get(delegated_host_name, dict())
|
||||
|
||||
delegated_transport = C.DEFAULT_TRANSPORT
|
||||
for transport_var in MAGIC_VARIABLE_MAPPING.get('connection'):
|
||||
if transport_var in delegated_vars:
|
||||
delegated_transport = delegated_vars[transport_var]
|
||||
break
|
||||
|
||||
# make sure this delegated_to host has something set for its remote
|
||||
# address, otherwise we default to connecting to it by name. This
|
||||
# may happen when users put an IP entry into their inventory, or if
|
||||
@@ -326,15 +345,38 @@ class PlayContext(Base):
|
||||
else:
|
||||
display.debug("no remote address found for delegated host %s\nusing its name, so success depends on DNS resolution" % delegated_host_name)
|
||||
delegated_vars['ansible_host'] = delegated_host_name
|
||||
|
||||
# reset the port back to the default if none was specified, to prevent
|
||||
# the delegated host from inheriting the original host's setting
|
||||
for port_var in MAGIC_VARIABLE_MAPPING.get('port'):
|
||||
if port_var in delegated_vars:
|
||||
break
|
||||
else:
|
||||
if delegated_transport == 'winrm':
|
||||
delegated_vars['ansible_port'] = 5986
|
||||
else:
|
||||
delegated_vars['ansible_port'] = C.DEFAULT_REMOTE_PORT
|
||||
|
||||
# and likewise for the remote user
|
||||
for user_var in MAGIC_VARIABLE_MAPPING.get('remote_user'):
|
||||
if user_var in delegated_vars:
|
||||
break
|
||||
else:
|
||||
delegated_vars['ansible_user'] = task.remote_user or self.remote_user
|
||||
else:
|
||||
delegated_vars = dict()
|
||||
|
||||
attrs_considered = []
|
||||
for (attr, variable_names) in iteritems(MAGIC_VARIABLE_MAPPING):
|
||||
for variable_name in variable_names:
|
||||
if attr in attrs_considered:
|
||||
continue
|
||||
if isinstance(delegated_vars, dict) and variable_name in delegated_vars:
|
||||
setattr(new_info, attr, delegated_vars[variable_name])
|
||||
attrs_considered.append(attr)
|
||||
elif variable_name in variables:
|
||||
setattr(new_info, attr, variables[variable_name])
|
||||
attrs_considered.append(attr)
|
||||
|
||||
# make sure we get port defaults if needed
|
||||
if new_info.port is None and C.DEFAULT_REMOTE_PORT is not None:
|
||||
@@ -367,6 +409,13 @@ class PlayContext(Base):
|
||||
if new_info.no_log is None:
|
||||
new_info.no_log = C.DEFAULT_NO_LOG
|
||||
|
||||
# set become defaults if not previouslly set
|
||||
task.set_become_defaults(new_info.become, new_info.become_method, new_info.become_user)
|
||||
|
||||
# have always_run override check mode
|
||||
if task.always_run:
|
||||
new_info.check_mode = False
|
||||
|
||||
return new_info
|
||||
|
||||
def make_become_cmd(self, cmd, executable=None):
|
||||
@@ -473,7 +522,8 @@ class PlayContext(Base):
|
||||
|
||||
# TODO: should we be setting the more generic values here rather than
|
||||
# the more specific _ssh_ ones?
|
||||
for special_var in ['ansible_connection', 'ansible_ssh_host', 'ansible_ssh_pass', 'ansible_ssh_port', 'ansible_ssh_user', 'ansible_ssh_private_key_file', 'ansible_ssh_pipelining']:
|
||||
for special_var in RESET_VARS:
|
||||
|
||||
if special_var not in variables:
|
||||
for prop, varnames in MAGIC_VARIABLE_MAPPING.items():
|
||||
if special_var in varnames:
|
||||
|
||||
@@ -22,7 +22,7 @@ __metaclass__ = type
|
||||
import os
|
||||
|
||||
from ansible.compat.six import iteritems
|
||||
from ansible.errors import AnsibleParserError
|
||||
from ansible.errors import AnsibleParserError, AnsibleError
|
||||
from ansible.parsing.splitter import split_args, parse_kv
|
||||
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
|
||||
from ansible.playbook.attribute import FieldAttribute
|
||||
@@ -55,18 +55,25 @@ class PlaybookInclude(Base, Conditional, Taggable):
|
||||
# playbook objects
|
||||
new_obj = super(PlaybookInclude, self).load_data(ds, variable_manager, loader)
|
||||
|
||||
all_vars = dict()
|
||||
all_vars = self.vars.copy()
|
||||
if variable_manager:
|
||||
all_vars = variable_manager.get_vars(loader=loader)
|
||||
all_vars.update(variable_manager.get_vars(loader=loader))
|
||||
|
||||
templar = Templar(loader=loader, variables=all_vars)
|
||||
if not new_obj.evaluate_conditional(templar=templar, all_vars=all_vars):
|
||||
return None
|
||||
|
||||
try:
|
||||
forward_conditional = False
|
||||
if not new_obj.evaluate_conditional(templar=templar, all_vars=all_vars):
|
||||
return None
|
||||
except AnsibleError:
|
||||
# conditional evaluation raised an error, so we set a flag to indicate
|
||||
# we need to forward the conditionals on to the included play(s)
|
||||
forward_conditional = True
|
||||
|
||||
# then we use the object to load a Playbook
|
||||
pb = Playbook(loader=loader)
|
||||
|
||||
file_name = new_obj.include
|
||||
file_name = templar.template(new_obj.include)
|
||||
if not os.path.isabs(file_name):
|
||||
file_name = os.path.join(basedir, file_name)
|
||||
|
||||
@@ -85,6 +92,13 @@ class PlaybookInclude(Base, Conditional, Taggable):
|
||||
if entry._included_path is None:
|
||||
entry._included_path = os.path.dirname(file_name)
|
||||
|
||||
# Check to see if we need to forward the conditionals on to the included
|
||||
# plays. If so, we can take a shortcut here and simply prepend them to
|
||||
# those attached to each block (if any)
|
||||
if forward_conditional:
|
||||
for task_block in entry.tasks:
|
||||
task_block.when = self.when[:] + task_block.when
|
||||
|
||||
return pb
|
||||
|
||||
def preprocess_data(self, ds):
|
||||
|
||||
@@ -43,7 +43,10 @@ __all__ = ['Role', 'hash_params']
|
||||
# strategies (ansible/plugins/strategy/__init__.py)
|
||||
def hash_params(params):
|
||||
if not isinstance(params, dict):
|
||||
return params
|
||||
if isinstance(params, list):
|
||||
return frozenset(params)
|
||||
else:
|
||||
return params
|
||||
else:
|
||||
s = set()
|
||||
for k,v in iteritems(params):
|
||||
@@ -61,6 +64,7 @@ def hash_params(params):
|
||||
class Role(Base, Become, Conditional, Taggable):
|
||||
|
||||
_delegate_to = FieldAttribute(isa='string')
|
||||
_delegate_facts = FieldAttribute(isa='bool', default=False)
|
||||
|
||||
def __init__(self, play=None):
|
||||
self._role_name = None
|
||||
@@ -149,7 +153,7 @@ class Role(Base, Become, Conditional, Taggable):
|
||||
current_when = getattr(self, 'when')[:]
|
||||
current_when.extend(role_include.when)
|
||||
setattr(self, 'when', current_when)
|
||||
|
||||
|
||||
current_tags = getattr(self, 'tags')[:]
|
||||
current_tags.extend(role_include.tags)
|
||||
setattr(self, 'tags', current_tags)
|
||||
@@ -171,11 +175,17 @@ class Role(Base, Become, Conditional, Taggable):
|
||||
|
||||
task_data = self._load_role_yaml('tasks')
|
||||
if task_data:
|
||||
self._task_blocks = load_list_of_blocks(task_data, play=self._play, role=self, loader=self._loader)
|
||||
try:
|
||||
self._task_blocks = load_list_of_blocks(task_data, play=self._play, role=self, loader=self._loader)
|
||||
except AssertionError:
|
||||
raise AnsibleParserError("The tasks/main.yml file for role '%s' must contain a list of tasks" % self._role_name , obj=task_data)
|
||||
|
||||
handler_data = self._load_role_yaml('handlers')
|
||||
if handler_data:
|
||||
self._handler_blocks = load_list_of_blocks(handler_data, play=self._play, role=self, use_handlers=True, loader=self._loader)
|
||||
try:
|
||||
self._handler_blocks = load_list_of_blocks(handler_data, play=self._play, role=self, use_handlers=True, loader=self._loader)
|
||||
except:
|
||||
raise AnsibleParserError("The handlers/main.yml file for role '%s' must contain a list of tasks" % self._role_name , obj=task_data)
|
||||
|
||||
# vars and default vars are regular dictionaries
|
||||
self._role_vars = self._load_role_yaml('vars')
|
||||
@@ -258,6 +268,12 @@ class Role(Base, Become, Conditional, Taggable):
|
||||
inherited_vars = combine_vars(inherited_vars, parent._role_params)
|
||||
return inherited_vars
|
||||
|
||||
def get_role_params(self):
|
||||
params = {}
|
||||
for dep in self.get_all_dependencies():
|
||||
params = combine_vars(params, dep._role_params)
|
||||
return params
|
||||
|
||||
def get_vars(self, dep_chain=[], include_params=True):
|
||||
all_vars = self.get_inherited_vars(dep_chain, include_params=include_params)
|
||||
|
||||
@@ -307,7 +323,7 @@ class Role(Base, Become, Conditional, Taggable):
|
||||
|
||||
return host.name in self._completed and not self._metadata.allow_duplicates
|
||||
|
||||
def compile(self, play, dep_chain=[]):
|
||||
def compile(self, play, dep_chain=None):
|
||||
'''
|
||||
Returns the task list for this role, which is created by first
|
||||
recursively compiling the tasks for all direct dependencies, and
|
||||
@@ -321,18 +337,20 @@ class Role(Base, Become, Conditional, Taggable):
|
||||
block_list = []
|
||||
|
||||
# update the dependency chain here
|
||||
if dep_chain is None:
|
||||
dep_chain = []
|
||||
new_dep_chain = dep_chain + [self]
|
||||
|
||||
deps = self.get_direct_dependencies()
|
||||
for dep in deps:
|
||||
dep_blocks = dep.compile(play=play, dep_chain=new_dep_chain)
|
||||
for dep_block in dep_blocks:
|
||||
new_dep_block = dep_block.copy()
|
||||
new_dep_block._dep_chain = new_dep_chain
|
||||
new_dep_block._play = play
|
||||
block_list.append(new_dep_block)
|
||||
block_list.extend(dep_blocks)
|
||||
|
||||
block_list.extend(self._task_blocks)
|
||||
for task_block in self._task_blocks:
|
||||
new_task_block = task_block.copy()
|
||||
new_task_block._dep_chain = new_dep_chain
|
||||
new_task_block._play = play
|
||||
block_list.append(new_task_block)
|
||||
|
||||
return block_list
|
||||
|
||||
|
||||
@@ -135,46 +135,44 @@ class RoleDefinition(Base, Become, Conditional, Taggable):
|
||||
append it to the default role path
|
||||
'''
|
||||
|
||||
role_path = unfrackpath(role_name)
|
||||
# we always start the search for roles in the base directory of the playbook
|
||||
role_search_paths = [
|
||||
os.path.join(self._loader.get_basedir(), u'roles'),
|
||||
self._loader.get_basedir(),
|
||||
]
|
||||
|
||||
# also search in the configured roles path
|
||||
if C.DEFAULT_ROLES_PATH:
|
||||
configured_paths = C.DEFAULT_ROLES_PATH.split(os.pathsep)
|
||||
role_search_paths.extend(configured_paths)
|
||||
|
||||
# finally, append the roles basedir, if it was set, so we can
|
||||
# search relative to that directory for dependent roles
|
||||
if self._role_basedir:
|
||||
role_search_paths.append(self._role_basedir)
|
||||
|
||||
# create a templar class to template the dependency names, in
|
||||
# case they contain variables
|
||||
if self._variable_manager is not None:
|
||||
all_vars = self._variable_manager.get_vars(loader=self._loader, play=self._play)
|
||||
else:
|
||||
all_vars = dict()
|
||||
|
||||
templar = Templar(loader=self._loader, variables=all_vars)
|
||||
role_name = templar.template(role_name)
|
||||
|
||||
# now iterate through the possible paths and return the first one we find
|
||||
for path in role_search_paths:
|
||||
path = templar.template(path)
|
||||
role_path = unfrackpath(os.path.join(path, role_name))
|
||||
if self._loader.path_exists(role_path):
|
||||
return (role_name, role_path)
|
||||
|
||||
# if not found elsewhere try to extract path from name
|
||||
role_path = unfrackpath(role_name)
|
||||
if self._loader.path_exists(role_path):
|
||||
role_name = os.path.basename(role_name)
|
||||
return (role_name, role_path)
|
||||
else:
|
||||
# we always start the search for roles in the base directory of the playbook
|
||||
role_search_paths = [
|
||||
os.path.join(self._loader.get_basedir(), u'roles'),
|
||||
u'./roles',
|
||||
self._loader.get_basedir(),
|
||||
u'./'
|
||||
]
|
||||
|
||||
# also search in the configured roles path
|
||||
if C.DEFAULT_ROLES_PATH:
|
||||
configured_paths = C.DEFAULT_ROLES_PATH.split(os.pathsep)
|
||||
role_search_paths.extend(configured_paths)
|
||||
|
||||
# finally, append the roles basedir, if it was set, so we can
|
||||
# search relative to that directory for dependent roles
|
||||
if self._role_basedir:
|
||||
role_search_paths.append(self._role_basedir)
|
||||
|
||||
# create a templar class to template the dependency names, in
|
||||
# case they contain variables
|
||||
if self._variable_manager is not None:
|
||||
all_vars = self._variable_manager.get_vars(loader=self._loader, play=self._play)
|
||||
else:
|
||||
all_vars = dict()
|
||||
|
||||
templar = Templar(loader=self._loader, variables=all_vars)
|
||||
role_name = templar.template(role_name)
|
||||
|
||||
# now iterate through the possible paths and return the first one we find
|
||||
for path in role_search_paths:
|
||||
path = templar.template(path)
|
||||
role_path = unfrackpath(os.path.join(path, role_name))
|
||||
if self._loader.path_exists(role_path):
|
||||
return (role_name, role_path)
|
||||
|
||||
raise AnsibleError("the role '%s' was not found in %s" % (role_name, ":".join(role_search_paths)), obj=self._ds)
|
||||
|
||||
@@ -190,7 +188,12 @@ class RoleDefinition(Base, Become, Conditional, Taggable):
|
||||
for (key, value) in iteritems(ds):
|
||||
# use the list of FieldAttribute values to determine what is and is not
|
||||
# an extra parameter for this role (or sub-class of this role)
|
||||
if key not in base_attribute_names:
|
||||
# FIXME: hard-coded list of exception key names here corresponds to the
|
||||
# connection fields in the Base class. There may need to be some
|
||||
# other mechanism where we exclude certain kinds of field attributes,
|
||||
# or make this list more automatic in some way so we don't have to
|
||||
# remember to update it manually.
|
||||
if key not in base_attribute_names or key in ('connection', 'port', 'remote_user'):
|
||||
# this key does not match a field attribute, so it must be a role param
|
||||
role_params[key] = value
|
||||
else:
|
||||
|
||||
@@ -40,7 +40,8 @@ class RoleInclude(RoleDefinition):
|
||||
is included for execution in a play.
|
||||
"""
|
||||
|
||||
_delegate_to = FieldAttribute(isa='string')
|
||||
_delegate_to = FieldAttribute(isa='string')
|
||||
_delegate_facts = FieldAttribute(isa='bool', default=False)
|
||||
|
||||
def __init__(self, play=None, role_basedir=None, variable_manager=None, loader=None):
|
||||
super(RoleInclude, self).__init__(play=play, role_basedir=role_basedir, variable_manager=variable_manager, loader=loader)
|
||||
|
||||
@@ -38,7 +38,11 @@ class Taggable:
|
||||
if isinstance(ds, list):
|
||||
return ds
|
||||
elif isinstance(ds, basestring):
|
||||
return [ ds ]
|
||||
value = ds.split(',')
|
||||
if isinstance(value, list):
|
||||
return [ x.strip() for x in value ]
|
||||
else:
|
||||
return [ ds ]
|
||||
else:
|
||||
raise AnsibleError('tags must be specified as a list', obj=ds)
|
||||
|
||||
|
||||
@@ -72,6 +72,7 @@ class Task(Base, Conditional, Taggable, Become):
|
||||
_changed_when = FieldAttribute(isa='string')
|
||||
_delay = FieldAttribute(isa='int', default=5)
|
||||
_delegate_to = FieldAttribute(isa='string')
|
||||
_delegate_facts = FieldAttribute(isa='bool', default=False)
|
||||
_failed_when = FieldAttribute(isa='string')
|
||||
_first_available_file = FieldAttribute(isa='list')
|
||||
_loop = FieldAttribute(isa='string', private=True)
|
||||
@@ -81,7 +82,7 @@ class Task(Base, Conditional, Taggable, Become):
|
||||
_poll = FieldAttribute(isa='int')
|
||||
_register = FieldAttribute(isa='string')
|
||||
_retries = FieldAttribute(isa='int', default=3)
|
||||
_until = FieldAttribute(isa='list')
|
||||
_until = FieldAttribute(isa='string')
|
||||
|
||||
def __init__(self, block=None, role=None, task_include=None):
|
||||
''' constructors a task, without the Task.load classmethod, it will be pretty blank '''
|
||||
@@ -106,11 +107,10 @@ class Task(Base, Conditional, Taggable, Become):
|
||||
elif self.name:
|
||||
return self.name
|
||||
else:
|
||||
flattened_args = self._merge_kv(self.args)
|
||||
if self._role:
|
||||
return "%s : %s %s" % (self._role.get_name(), self.action, flattened_args)
|
||||
return "%s : %s" % (self._role.get_name(), self.action)
|
||||
else:
|
||||
return "%s %s" % (self.action, flattened_args)
|
||||
return "%s" % (self.action,)
|
||||
|
||||
def _merge_kv(self, ds):
|
||||
if ds is None:
|
||||
@@ -133,7 +133,10 @@ class Task(Base, Conditional, Taggable, Become):
|
||||
|
||||
def __repr__(self):
|
||||
''' returns a human readable representation of the task '''
|
||||
return "TASK: %s" % self.get_name()
|
||||
if self.get_name() == 'meta ':
|
||||
return "TASK: meta (%s)" % self.args['_raw_params']
|
||||
else:
|
||||
return "TASK: %s" % self.get_name()
|
||||
|
||||
def _preprocess_loop(self, ds, new_ds, k, v):
|
||||
''' take a lookup plugin name and store it correctly '''
|
||||
@@ -213,14 +216,6 @@ class Task(Base, Conditional, Taggable, Become):
|
||||
|
||||
return super(Task, self).preprocess_data(new_ds)
|
||||
|
||||
def _load_any_errors_fatal(self, attr, value):
|
||||
'''
|
||||
Exists only to show a deprecation warning, as this attribute is not valid
|
||||
at the task level.
|
||||
'''
|
||||
display.deprecated("Setting any_errors_fatal on a task is no longer supported. This should be set at the play level only")
|
||||
return None
|
||||
|
||||
def post_validate(self, templar):
|
||||
'''
|
||||
Override of base class post_validate, to also do final validation on
|
||||
@@ -256,6 +251,27 @@ class Task(Base, Conditional, Taggable, Become):
|
||||
break
|
||||
return templar.template(value, convert_bare=True)
|
||||
|
||||
def _post_validate_changed_when(self, attr, value, templar):
|
||||
'''
|
||||
changed_when is evaluated after the execution of the task is complete,
|
||||
and should not be templated during the regular post_validate step.
|
||||
'''
|
||||
return value
|
||||
|
||||
def _post_validate_failed_when(self, attr, value, templar):
|
||||
'''
|
||||
failed_when is evaluated after the execution of the task is complete,
|
||||
and should not be templated during the regular post_validate step.
|
||||
'''
|
||||
return value
|
||||
|
||||
def _post_validate_until(self, attr, value, templar):
|
||||
'''
|
||||
until is evaluated after the execution of the task is complete,
|
||||
and should not be templated during the regular post_validate step.
|
||||
'''
|
||||
return value
|
||||
|
||||
def get_vars(self):
|
||||
all_vars = dict()
|
||||
if self._block:
|
||||
@@ -272,6 +288,14 @@ class Task(Base, Conditional, Taggable, Become):
|
||||
|
||||
return all_vars
|
||||
|
||||
def get_include_params(self):
|
||||
all_vars = dict()
|
||||
if self._task_include:
|
||||
all_vars.update(self._task_include.get_include_params())
|
||||
if self.action == 'include':
|
||||
all_vars.update(self.vars)
|
||||
return all_vars
|
||||
|
||||
def copy(self, exclude_block=False):
|
||||
new_me = super(Task, self).copy()
|
||||
|
||||
@@ -390,3 +414,14 @@ class Task(Base, Conditional, Taggable, Become):
|
||||
if parent_environment is not None:
|
||||
environment = self._extend_value(environment, parent_environment)
|
||||
return environment
|
||||
|
||||
def _get_attr_any_errors_fatal(self):
|
||||
'''
|
||||
Override for the 'tags' getattr fetcher, used from Base.
|
||||
'''
|
||||
any_errors_fatal = self._attributes['any_errors_fatal']
|
||||
if hasattr(self, '_get_parent_attribute'):
|
||||
if self._get_parent_attribute('any_errors_fatal'):
|
||||
any_errors_fatal = True
|
||||
return any_errors_fatal
|
||||
|
||||
|
||||
@@ -213,15 +213,6 @@ class PluginLoader:
|
||||
def find_plugin(self, name, mod_type=''):
|
||||
''' Find a plugin named name '''
|
||||
|
||||
# The particular cache to look for modules within. This matches the
|
||||
# requested mod_type
|
||||
pull_cache = self._plugin_path_cache[mod_type]
|
||||
try:
|
||||
return pull_cache[name]
|
||||
except KeyError:
|
||||
# Cache miss. Now let's find the plugin
|
||||
pass
|
||||
|
||||
if mod_type:
|
||||
suffix = mod_type
|
||||
elif self.class_name:
|
||||
@@ -232,6 +223,15 @@ class PluginLoader:
|
||||
# they can have any suffix
|
||||
suffix = ''
|
||||
|
||||
# The particular cache to look for modules within. This matches the
|
||||
# requested mod_type
|
||||
pull_cache = self._plugin_path_cache[suffix]
|
||||
try:
|
||||
return pull_cache[name]
|
||||
except KeyError:
|
||||
# Cache miss. Now let's find the plugin
|
||||
pass
|
||||
|
||||
# TODO: Instead of using the self._paths cache (PATH_CACHE) and
|
||||
# self._searched_paths we could use an iterator. Before enabling that
|
||||
# we need to make sure we don't want to add additional directories
|
||||
|
||||
@@ -24,6 +24,7 @@ import json
|
||||
import os
|
||||
import pipes
|
||||
import random
|
||||
import re
|
||||
import stat
|
||||
import tempfile
|
||||
import time
|
||||
@@ -119,7 +120,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
||||
module_path = self._shared_loader_obj.module_loader.find_plugin(module_name, mod_type)
|
||||
if module_path:
|
||||
break
|
||||
else:
|
||||
else: # This is a for-else: http://bit.ly/1ElPkyg
|
||||
# Use Windows version of ping module to check module paths when
|
||||
# using a connection that supports .ps1 suffixes. We check specifically
|
||||
# for win_ping here, otherwise the code would look for ping.ps1
|
||||
@@ -151,15 +152,21 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
||||
if not isinstance(environments, list):
|
||||
environments = [ environments ]
|
||||
|
||||
# the environments as inherited need to be reversed, to make
|
||||
# sure we merge in the parent's values first so those in the
|
||||
# block then task 'win' in precedence
|
||||
environments.reverse()
|
||||
for environment in environments:
|
||||
if environment is None:
|
||||
continue
|
||||
if not isinstance(environment, dict):
|
||||
raise AnsibleError("environment must be a dictionary, received %s (%s)" % (environment, type(environment)))
|
||||
temp_environment = self._templar.template(environment)
|
||||
if not isinstance(temp_environment, dict):
|
||||
raise AnsibleError("environment must be a dictionary, received %s (%s)" % (temp_environment, type(temp_environment)))
|
||||
# very deliberately using update here instead of combine_vars, as
|
||||
# these environment settings should not need to merge sub-dicts
|
||||
final_environment.update(environment)
|
||||
final_environment.update(temp_environment)
|
||||
|
||||
final_environment = self._templar.template(final_environment)
|
||||
return self._connection._shell.env_prefix(**final_environment)
|
||||
|
||||
def _early_needs_tmp_path(self):
|
||||
@@ -201,9 +208,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
||||
tmp_mode = 0o755
|
||||
|
||||
cmd = self._connection._shell.mkdtemp(basefile, use_system_tmp, tmp_mode)
|
||||
display.debug("executing _low_level_execute_command to create the tmp path")
|
||||
result = self._low_level_execute_command(cmd, sudoable=False)
|
||||
display.debug("done with creation of tmp path")
|
||||
|
||||
# error handling on this seems a little aggressive?
|
||||
if result['rc'] != 0:
|
||||
@@ -228,7 +233,11 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
||||
output = output + u": %s" % result['stdout']
|
||||
raise AnsibleConnectionFailure(output)
|
||||
|
||||
rc = self._connection._shell.join_path(result['stdout'].strip(), u'').splitlines()[-1]
|
||||
try:
|
||||
rc = self._connection._shell.join_path(result['stdout'].strip(), u'').splitlines()[-1]
|
||||
except IndexError:
|
||||
# stdout was empty or just space, set to / to trigger error in next if
|
||||
rc = '/'
|
||||
|
||||
# Catch failure conditions, files should never be
|
||||
# written to locations in /.
|
||||
@@ -244,9 +253,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
||||
cmd = self._connection._shell.remove(tmp_path, recurse=True)
|
||||
# If we have gotten here we have a working ssh configuration.
|
||||
# If ssh breaks we could leave tmp directories out on the remote system.
|
||||
display.debug("calling _low_level_execute_command to remove the tmp path")
|
||||
self._low_level_execute_command(cmd, sudoable=False)
|
||||
display.debug("done removing the tmp path")
|
||||
|
||||
def _transfer_data(self, remote_path, data):
|
||||
'''
|
||||
@@ -281,9 +288,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
||||
'''
|
||||
|
||||
cmd = self._connection._shell.chmod(mode, path)
|
||||
display.debug("calling _low_level_execute_command to chmod the remote path")
|
||||
res = self._low_level_execute_command(cmd, sudoable=sudoable)
|
||||
display.debug("done with chmod call")
|
||||
return res
|
||||
|
||||
def _remote_checksum(self, path, all_vars):
|
||||
@@ -294,9 +299,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
||||
python_interp = all_vars.get('ansible_python_interpreter', 'python')
|
||||
|
||||
cmd = self._connection._shell.checksum(path, python_interp)
|
||||
display.debug("calling _low_level_execute_command to get the remote checksum")
|
||||
data = self._low_level_execute_command(cmd, sudoable=True)
|
||||
display.debug("done getting the remote checksum")
|
||||
try:
|
||||
data2 = data['stdout'].strip().splitlines()[-1]
|
||||
if data2 == u'':
|
||||
@@ -324,9 +327,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
||||
expand_path = '~%s' % self._play_context.become_user
|
||||
|
||||
cmd = self._connection._shell.expand_user(expand_path)
|
||||
display.debug("calling _low_level_execute_command to expand the remote user path")
|
||||
data = self._low_level_execute_command(cmd, sudoable=False)
|
||||
display.debug("done expanding the remote user path")
|
||||
#initial_fragment = utils.last_non_blank_line(data['stdout'])
|
||||
initial_fragment = data['stdout'].strip().splitlines()[-1]
|
||||
|
||||
@@ -356,6 +357,14 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
||||
|
||||
return data[idx:]
|
||||
|
||||
def _strip_success_message(self, data):
|
||||
'''
|
||||
Removes the BECOME-SUCCESS message from the data.
|
||||
'''
|
||||
if data.strip().startswith('BECOME-SUCCESS-'):
|
||||
data = re.sub(r'^((\r)?\n)?BECOME-SUCCESS.*(\r)?\n', '', data)
|
||||
return data
|
||||
|
||||
def _execute_module(self, module_name=None, module_args=None, tmp=None, task_vars=None, persist_files=False, delete_remote_tmp=True):
|
||||
'''
|
||||
Transfer and run a module along with its arguments.
|
||||
@@ -371,22 +380,28 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
||||
module_args = self._task.args
|
||||
|
||||
# set check mode in the module arguments, if required
|
||||
if self._play_context.check_mode and not self._task.always_run:
|
||||
if self._play_context.check_mode:
|
||||
if not self._supports_check_mode:
|
||||
raise AnsibleError("check mode is not supported for this operation")
|
||||
module_args['_ansible_check_mode'] = True
|
||||
else:
|
||||
module_args['_ansible_check_mode'] = False
|
||||
|
||||
# set no log in the module arguments, if required
|
||||
if self._play_context.no_log or not C.DEFAULT_NO_TARGET_SYSLOG:
|
||||
module_args['_ansible_no_log'] = True
|
||||
module_args['_ansible_no_log'] = self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG
|
||||
|
||||
# set debug in the module arguments, if required
|
||||
if C.DEFAULT_DEBUG:
|
||||
module_args['_ansible_debug'] = True
|
||||
module_args['_ansible_debug'] = C.DEFAULT_DEBUG
|
||||
|
||||
# let module know we are in diff mode
|
||||
module_args['_ansible_diff'] = self._play_context.diff
|
||||
|
||||
# let module know our verbosity
|
||||
module_args['_ansible_verbosity'] = self._display.verbosity
|
||||
|
||||
(module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars)
|
||||
if not shebang:
|
||||
raise AnsibleError("module is missing interpreter line")
|
||||
raise AnsibleError("module (%s) is missing interpreter line" % module_name)
|
||||
|
||||
# a remote tmp path may be necessary and not already created
|
||||
remote_module_path = None
|
||||
@@ -395,8 +410,9 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
||||
tmp = self._make_tmp_path()
|
||||
|
||||
if tmp:
|
||||
remote_module_path = self._connection._shell.join_path(tmp, module_name)
|
||||
if module_style == 'old':
|
||||
remote_module_filename = self._connection._shell.get_remote_filename(module_name)
|
||||
remote_module_path = self._connection._shell.join_path(tmp, remote_module_filename)
|
||||
if module_style in ['old', 'non_native_want_json']:
|
||||
# we'll also need a temp file to hold our module arguments
|
||||
args_file_path = self._connection._shell.join_path(tmp, 'args')
|
||||
|
||||
@@ -408,8 +424,10 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
||||
# the remote system, which can be read and parsed by the module
|
||||
args_data = ""
|
||||
for k,v in iteritems(module_args):
|
||||
args_data += '%s="%s" ' % (k, pipes.quote(v))
|
||||
args_data += '%s="%s" ' % (k, pipes.quote(text_type(v)))
|
||||
self._transfer_data(args_file_path, args_data)
|
||||
elif module_style == 'non_native_want_json':
|
||||
self._transfer_data(args_file_path, json.dumps(module_args))
|
||||
display.debug("done transferring module to remote")
|
||||
|
||||
environment_string = self._compute_environment_string()
|
||||
@@ -421,7 +439,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
||||
cmd = ""
|
||||
in_data = None
|
||||
|
||||
if self._connection.has_pipelining and self._play_context.pipelining and not C.DEFAULT_KEEP_REMOTE_FILES:
|
||||
if self._connection.has_pipelining and self._play_context.pipelining and not C.DEFAULT_KEEP_REMOTE_FILES and module_style == 'new':
|
||||
in_data = module_data
|
||||
else:
|
||||
if remote_module_path:
|
||||
@@ -442,9 +460,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
||||
# specified in the play, not the sudo_user
|
||||
sudoable = False
|
||||
|
||||
display.debug("calling _low_level_execute_command() for command %s" % cmd)
|
||||
res = self._low_level_execute_command(cmd, sudoable=sudoable, in_data=in_data)
|
||||
display.debug("_low_level_execute_command returned ok")
|
||||
|
||||
if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
|
||||
if self._play_context.become and self._play_context.become_user != 'root':
|
||||
@@ -461,9 +477,10 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
||||
if 'stderr' in res and res['stderr'].startswith(u'Traceback'):
|
||||
data['exception'] = res['stderr']
|
||||
else:
|
||||
data['msg'] = res.get('stdout', u'')
|
||||
data['msg'] = "MODULE FAILURE"
|
||||
data['module_stdout'] = res.get('stdout', u'')
|
||||
if 'stderr' in res:
|
||||
data['msg'] += res['stderr']
|
||||
data['module_stderr'] = res['stderr']
|
||||
|
||||
# pre-split stdout into lines, if stdout is in the data and there
|
||||
# isn't already a stdout_lines value there
|
||||
@@ -473,8 +490,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
||||
display.debug("done with _execute_module (%s, %s)" % (module_name, module_args))
|
||||
return data
|
||||
|
||||
def _low_level_execute_command(self, cmd, sudoable=True, in_data=None,
|
||||
executable=None, encoding_errors='replace'):
|
||||
def _low_level_execute_command(self, cmd, sudoable=True, in_data=None, executable=C.DEFAULT_EXECUTABLE, encoding_errors='replace'):
|
||||
'''
|
||||
This is the function which executes the low level shell command, which
|
||||
may be commands to create/remove directories for temporary files, or to
|
||||
@@ -490,23 +506,22 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
||||
'''
|
||||
|
||||
if executable is not None:
|
||||
cmd = executable + ' -c ' + cmd
|
||||
cmd = executable + ' -c ' + pipes.quote(cmd)
|
||||
|
||||
display.debug("in _low_level_execute_command() (%s)" % (cmd,))
|
||||
display.debug("_low_level_execute_command(): starting")
|
||||
if not cmd:
|
||||
# this can happen with powershell modules when there is no analog to a Windows command (like chmod)
|
||||
display.debug("no command, exiting _low_level_execute_command()")
|
||||
display.debug("_low_level_execute_command(): no command, exiting")
|
||||
return dict(stdout='', stderr='')
|
||||
|
||||
allow_same_user = C.BECOME_ALLOW_SAME_USER
|
||||
same_user = self._play_context.become_user == self._play_context.remote_user
|
||||
if sudoable and self._play_context.become and (allow_same_user or not same_user):
|
||||
display.debug("using become for this command")
|
||||
display.debug("_low_level_execute_command(): using become for this command")
|
||||
cmd = self._play_context.make_become_cmd(cmd, executable=executable)
|
||||
|
||||
display.debug("executing the command %s through the connection" % cmd)
|
||||
display.debug("_low_level_execute_command(): executing: %s" % (cmd,))
|
||||
rc, stdout, stderr = self._connection.exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||
display.debug("command execution done")
|
||||
|
||||
# stdout and stderr may be either a file-like or a bytes object.
|
||||
# Convert either one to a text type
|
||||
@@ -524,10 +539,11 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
||||
else:
|
||||
err = stderr
|
||||
|
||||
display.debug("done with _low_level_execute_command() (%s)" % (cmd,))
|
||||
if rc is None:
|
||||
rc = 0
|
||||
|
||||
display.debug("_low_level_execute_command() done: rc=%d, stdout=%s, stderr=%s" % (rc, stdout, stderr))
|
||||
|
||||
return dict(rc=rc, stdout=out, stdout_lines=out.splitlines(), stderr=err)
|
||||
|
||||
def _get_first_available_file(self, faf, of=None, searchdir='files'):
|
||||
|
||||
@@ -53,9 +53,13 @@ class ActionModule(ActionBase):
|
||||
new_name = self._task.args.get('name', self._task.args.get('hostname', None))
|
||||
display.vv("creating host via 'add_host': hostname=%s" % new_name)
|
||||
|
||||
name, port = parse_address(new_name, allow_ranges=False)
|
||||
if not name:
|
||||
raise AnsibleError("Invalid inventory hostname: %s" % new_name)
|
||||
try:
|
||||
name, port = parse_address(new_name, allow_ranges=False)
|
||||
except:
|
||||
# not a parsable hostname, but might still be usable
|
||||
name = new_name
|
||||
port = None
|
||||
|
||||
if port:
|
||||
self._task.args['ansible_ssh_port'] = port
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@ class ActionModule(ActionBase):
|
||||
env_string = self._compute_environment_string()
|
||||
|
||||
module_args = self._task.args.copy()
|
||||
if self._play_context.no_log or not C.DEFAULT_NO_TARGET_SYSLOG:
|
||||
if self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG:
|
||||
module_args['_ansible_no_log'] = True
|
||||
|
||||
# configure, upload, and chmod the target module
|
||||
@@ -75,4 +75,8 @@ class ActionModule(ActionBase):
|
||||
|
||||
result['changed'] = True
|
||||
|
||||
# be sure to strip out the BECOME-SUCCESS message, which may
|
||||
# be there depending on the output of the module
|
||||
result['stdout'] = self._strip_success_message(result.get('stdout', ''))
|
||||
|
||||
return result
|
||||
|
||||
@@ -19,33 +19,46 @@ __metaclass__ = type
|
||||
|
||||
from ansible.plugins.action import ActionBase
|
||||
from ansible.utils.boolean import boolean
|
||||
|
||||
from ansible.utils.unicode import to_unicode
|
||||
from ansible.errors import AnsibleUndefinedVariable
|
||||
|
||||
class ActionModule(ActionBase):
|
||||
''' Print statements during execution '''
|
||||
|
||||
TRANSFERS_FILES = False
|
||||
VALID_ARGS = set(['msg', 'var'])
|
||||
|
||||
def run(self, tmp=None, task_vars=None):
|
||||
if task_vars is None:
|
||||
task_vars = dict()
|
||||
|
||||
for arg in self._task.args:
|
||||
if arg not in self.VALID_ARGS:
|
||||
return {"failed": True, "msg": "'%s' is not a valid option in debug" % arg}
|
||||
|
||||
if 'msg' in self._task.args and 'var' in self._task.args:
|
||||
return {"failed": True, "msg": "'msg' and 'var' are incompatible options"}
|
||||
|
||||
result = super(ActionModule, self).run(tmp, task_vars)
|
||||
|
||||
if 'msg' in self._task.args:
|
||||
if 'fail' in self._task.args and boolean(self._task.args['fail']):
|
||||
result['failed'] = True
|
||||
result['msg'] = self._task.args['msg']
|
||||
else:
|
||||
result['msg'] = self._task.args['msg']
|
||||
# FIXME: move the LOOKUP_REGEX somewhere else
|
||||
elif 'var' in self._task.args: # and not utils.LOOKUP_REGEX.search(self._task.args['var']):
|
||||
results = self._templar.template(self._task.args['var'], convert_bare=True)
|
||||
if results == self._task.args['var']:
|
||||
result['msg'] = self._task.args['msg']
|
||||
|
||||
elif 'var' in self._task.args:
|
||||
try:
|
||||
results = self._templar.template(self._task.args['var'], convert_bare=True, fail_on_undefined=True)
|
||||
if results == self._task.args['var']:
|
||||
raise AnsibleUndefinedVariable
|
||||
except AnsibleUndefinedVariable:
|
||||
results = "VARIABLE IS NOT DEFINED!"
|
||||
result[self._task.args['var']] = results
|
||||
|
||||
if type(self._task.args['var']) in (list, dict):
|
||||
# If var is a list or dict, use the type as key to display
|
||||
result[to_unicode(type(self._task.args['var']))] = results
|
||||
else:
|
||||
result[self._task.args['var']] = results
|
||||
else:
|
||||
result['msg'] = 'here we are'
|
||||
result['msg'] = 'Hello world!'
|
||||
|
||||
# force flag to make debug output module always verbose
|
||||
result['_ansible_verbose_always'] = True
|
||||
|
||||
@@ -60,20 +60,22 @@ class ActionModule(ActionBase):
|
||||
source = self._connection._shell.join_path(source)
|
||||
source = self._remote_expand_user(source)
|
||||
|
||||
# calculate checksum for the remote file
|
||||
remote_checksum = self._remote_checksum(source, all_vars=task_vars)
|
||||
remote_checksum = None
|
||||
if not self._play_context.become:
|
||||
# calculate checksum for the remote file, don't bother if using become as slurp will be used
|
||||
remote_checksum = self._remote_checksum(source, all_vars=task_vars)
|
||||
|
||||
# use slurp if sudo and permissions are lacking
|
||||
# use slurp if permissions are lacking or privilege escalation is needed
|
||||
remote_data = None
|
||||
if remote_checksum in ('1', '2') or self._play_context.become:
|
||||
if remote_checksum in ('1', '2', None):
|
||||
slurpres = self._execute_module(module_name='slurp', module_args=dict(src=source), task_vars=task_vars, tmp=tmp)
|
||||
if slurpres.get('failed'):
|
||||
if remote_checksum == '1' and not fail_on_missing:
|
||||
if not fail_on_missing and (slurpres.get('msg').startswith('file not found') or remote_checksum == '1'):
|
||||
result['msg'] = "the remote file does not exist, not transferring, ignored"
|
||||
result['file'] = source
|
||||
result['changed'] = False
|
||||
return result
|
||||
result.update(slurpres)
|
||||
else:
|
||||
result.update(slurpres)
|
||||
return result
|
||||
else:
|
||||
if slurpres['encoding'] == 'base64':
|
||||
@@ -115,8 +117,8 @@ class ActionModule(ActionBase):
|
||||
dest = dest.replace("//","/")
|
||||
|
||||
if remote_checksum in ('0', '1', '2', '3', '4'):
|
||||
# these don't fail because you may want to transfer a log file that possibly MAY exist
|
||||
# but keep going to fetch other log files
|
||||
# these don't fail because you may want to transfer a log file that
|
||||
# possibly MAY exist but keep going to fetch other log files
|
||||
if remote_checksum == '0':
|
||||
result['msg'] = "unable to calculate the checksum of the remote file"
|
||||
result['file'] = source
|
||||
@@ -162,25 +164,24 @@ class ActionModule(ActionBase):
|
||||
except (IOError, OSError) as e:
|
||||
raise AnsibleError("Failed to fetch the file: %s" % e)
|
||||
new_checksum = secure_hash(dest)
|
||||
# For backwards compatibility. We'll return None on FIPS enabled
|
||||
# systems
|
||||
# For backwards compatibility. We'll return None on FIPS enabled systems
|
||||
try:
|
||||
new_md5 = md5(dest)
|
||||
except ValueError:
|
||||
new_md5 = None
|
||||
|
||||
if validate_checksum and new_checksum != remote_checksum:
|
||||
result.update(dict(failed=True, md5sum=new_md5, msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum))
|
||||
return result
|
||||
result.update(dict(changed=True, md5sum=new_md5, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum))
|
||||
return result
|
||||
result.update(dict(failed=True, md5sum=new_md5,
|
||||
msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None,
|
||||
checksum=new_checksum, remote_checksum=remote_checksum))
|
||||
else:
|
||||
result.update(dict(changed=True, md5sum=new_md5, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum))
|
||||
else:
|
||||
# For backwards compatibility. We'll return None on FIPS enabled
|
||||
# systems
|
||||
# For backwards compatibility. We'll return None on FIPS enabled systems
|
||||
try:
|
||||
local_md5 = md5(dest)
|
||||
except ValueError:
|
||||
local_md5 = None
|
||||
|
||||
result.update(dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum))
|
||||
return result
|
||||
|
||||
return result
|
||||
|
||||
@@ -40,6 +40,6 @@ class ActionModule(ActionBase):
|
||||
group_name = self._task.args.get('key')
|
||||
group_name = group_name.replace(' ','-')
|
||||
|
||||
result['changed'] = True
|
||||
result['changed'] = False
|
||||
result['add_group'] = group_name
|
||||
return result
|
||||
|
||||
@@ -18,6 +18,7 @@ from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.plugins.action import ActionBase
|
||||
from ansible.utils.vars import merge_hash
|
||||
|
||||
|
||||
class ActionModule(ActionBase):
|
||||
@@ -27,12 +28,16 @@ class ActionModule(ActionBase):
|
||||
task_vars = dict()
|
||||
|
||||
results = super(ActionModule, self).run(tmp, task_vars)
|
||||
results.update(self._execute_module(tmp=tmp, task_vars=task_vars))
|
||||
|
||||
# remove as modules might hide due to nolog
|
||||
del results['invocation']['module_args']
|
||||
results = merge_hash(results, self._execute_module(tmp=tmp, task_vars=task_vars))
|
||||
# Remove special fields from the result, which can only be set
|
||||
# internally by the executor engine. We do this only here in
|
||||
# the 'normal' action, as other action plugins may set this.
|
||||
for field in ('ansible_notify',):
|
||||
#
|
||||
# We don't want modules to determine that running the module fires
|
||||
# notify handlers. That's for the playbook to decide.
|
||||
for field in ('_ansible_notify',):
|
||||
if field in results:
|
||||
results.pop(field)
|
||||
|
||||
|
||||
@@ -105,6 +105,8 @@ class ActionModule(ActionBase):
|
||||
result['start'] = str(datetime.datetime.now())
|
||||
result['user_input'] = ''
|
||||
|
||||
fd = None
|
||||
old_settings = None
|
||||
try:
|
||||
if seconds is not None:
|
||||
# setup the alarm handler
|
||||
@@ -159,7 +161,7 @@ class ActionModule(ActionBase):
|
||||
finally:
|
||||
# cleanup and save some information
|
||||
# restore the old settings for the duped stdin fd
|
||||
if isatty(fd):
|
||||
if not(None in (fd, old_settings)) and isatty(fd):
|
||||
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
|
||||
|
||||
duration = time.time() - start
|
||||
|
||||
@@ -19,8 +19,6 @@ __metaclass__ = type
|
||||
|
||||
from ansible.plugins.action import ActionBase
|
||||
|
||||
import re
|
||||
|
||||
|
||||
class ActionModule(ActionBase):
|
||||
TRANSFERS_FILES = False
|
||||
@@ -42,7 +40,6 @@ class ActionModule(ActionBase):
|
||||
# for some modules (script, raw), the sudo success key
|
||||
# may leak into the stdout due to the way the sudo/su
|
||||
# command is constructed, so we filter that out here
|
||||
if result.get('stdout','').strip().startswith('BECOME-SUCCESS-'):
|
||||
result['stdout'] = re.sub(r'^((\r)?\n)?BECOME-SUCCESS.*(\r)?\n', '', result['stdout'])
|
||||
result['stdout'] = self._strip_success_message(result.get('stdout', ''))
|
||||
|
||||
return result
|
||||
|
||||
@@ -131,7 +131,10 @@ class ActionModule(ActionBase):
|
||||
src_host = '127.0.0.1'
|
||||
inventory_hostname = task_vars.get('inventory_hostname')
|
||||
dest_host_inventory_vars = task_vars['hostvars'].get(inventory_hostname)
|
||||
dest_host = dest_host_inventory_vars.get('ansible_ssh_host', inventory_hostname)
|
||||
try:
|
||||
dest_host = dest_host_inventory_vars['ansible_host']
|
||||
except KeyError:
|
||||
dest_host = dest_host_inventory_vars.get('ansible_ssh_host', inventory_hostname)
|
||||
|
||||
dest_is_local = dest_host in C.LOCALHOST
|
||||
|
||||
|
||||
@@ -63,8 +63,13 @@ class ActionModule(ActionBase):
|
||||
dest = self._task.args.get('dest', None)
|
||||
faf = self._task.first_available_file
|
||||
force = boolean(self._task.args.get('force', True))
|
||||
state = self._task.args.get('state', None)
|
||||
|
||||
if (source is None and faf is not None) or dest is None:
|
||||
if state is not None:
|
||||
result['failed'] = True
|
||||
result['msg'] = "'state' cannot be specified on a template"
|
||||
return result
|
||||
elif (source is None and faf is not None) or dest is None:
|
||||
result['failed'] = True
|
||||
result['msg'] = "src and dest are required"
|
||||
return result
|
||||
@@ -150,7 +155,7 @@ class ActionModule(ActionBase):
|
||||
diff = {}
|
||||
new_module_args = self._task.args.copy()
|
||||
|
||||
if force and local_checksum != remote_checksum:
|
||||
if (remote_checksum == '1') or (force and local_checksum != remote_checksum):
|
||||
|
||||
result['changed'] = True
|
||||
# if showing diffs, we need to get the remote value
|
||||
|
||||
@@ -69,14 +69,14 @@ class ActionModule(ActionBase):
|
||||
source = self._loader.path_dwim_relative(self._loader.get_basedir(), 'files', source)
|
||||
|
||||
remote_checksum = self._remote_checksum(dest, all_vars=task_vars)
|
||||
if remote_checksum != '3':
|
||||
result['failed'] = True
|
||||
result['msg'] = "dest '%s' must be an existing dir" % dest
|
||||
return result
|
||||
elif remote_checksum == '4':
|
||||
if remote_checksum == '4':
|
||||
result['failed'] = True
|
||||
result['msg'] = "python isn't present on the system. Unable to compute checksum"
|
||||
return result
|
||||
elif remote_checksum != '3':
|
||||
result['failed'] = True
|
||||
result['msg'] = "dest '%s' must be an existing dir" % dest
|
||||
return result
|
||||
|
||||
if copy:
|
||||
# transfer the file to a remote tmp location
|
||||
|
||||
@@ -59,6 +59,21 @@ class CallbackBase:
|
||||
version = getattr(self, 'CALLBACK_VERSION', '1.0')
|
||||
self._display.vvvv('Loaded callback %s of type %s, v%s' % (name, ctype, version))
|
||||
|
||||
''' helper for callbacks, so they don't all have to include deepcopy '''
|
||||
_copy_result = deepcopy
|
||||
|
||||
def _copy_result_exclude(self, result, exclude):
|
||||
values = []
|
||||
for e in exclude:
|
||||
values.append(getattr(result, e))
|
||||
setattr(result, e, None)
|
||||
|
||||
result_copy = deepcopy(result)
|
||||
for i,e in enumerate(exclude):
|
||||
setattr(result, e, values[i])
|
||||
|
||||
return result_copy
|
||||
|
||||
def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False):
|
||||
if result.get('_ansible_no_log', False):
|
||||
return json.dumps(dict(censored="the output has been hidden due to the fact that 'no_log: true' was specified for this result"))
|
||||
@@ -101,6 +116,10 @@ class CallbackBase:
|
||||
if 'src_larger' in diff:
|
||||
ret.append("diff skipped: source file size is greater than %d\n" % diff['src_larger'])
|
||||
if 'before' in diff and 'after' in diff:
|
||||
# format complex structures into 'files'
|
||||
for x in ['before', 'after']:
|
||||
if isinstance(diff[x], dict):
|
||||
diff[x] = json.dumps(diff[x], sort_keys=True, indent=4)
|
||||
if 'before_header' in diff:
|
||||
before_header = "before: %s" % diff['before_header']
|
||||
else:
|
||||
@@ -126,7 +145,7 @@ class CallbackBase:
|
||||
|
||||
def _process_items(self, result):
|
||||
for res in result._result['results']:
|
||||
newres = deepcopy(result)
|
||||
newres = self._copy_result_exclude(result, ['_result'])
|
||||
res['item'] = self._get_item(res)
|
||||
newres._result = res
|
||||
if 'failed' in res and res['failed']:
|
||||
@@ -136,6 +155,12 @@ class CallbackBase:
|
||||
else:
|
||||
self.v2_playbook_item_on_ok(newres)
|
||||
|
||||
def _clean_results(self, result, task_name):
|
||||
if 'changed' in result and task_name in ['debug']:
|
||||
del result['changed']
|
||||
if 'invocation' in result and task_name in ['debug']:
|
||||
del result['invocation']
|
||||
|
||||
def set_play_context(self, play_context):
|
||||
pass
|
||||
|
||||
@@ -246,7 +271,7 @@ class CallbackBase:
|
||||
def v2_runner_on_file_diff(self, result, diff):
|
||||
pass #no v1 correspondance
|
||||
|
||||
def v2_playbook_on_start(self):
|
||||
def v2_playbook_on_start(self, playbook):
|
||||
self.playbook_on_start()
|
||||
|
||||
def v2_playbook_on_notify(self, result, handler):
|
||||
@@ -304,3 +329,12 @@ class CallbackBase:
|
||||
|
||||
def v2_playbook_on_include(self, included_file):
|
||||
pass #no v1 correspondance
|
||||
|
||||
def v2_playbook_item_on_ok(self, result):
|
||||
pass
|
||||
|
||||
def v2_playbook_item_on_failed(self, result):
|
||||
pass
|
||||
|
||||
def v2_playbook_item_on_skipped(self, result):
|
||||
pass
|
||||
|
||||
@@ -21,6 +21,7 @@ __metaclass__ = type
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
from ansible.utils.color import colorize, hostcolor
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
|
||||
@@ -43,7 +44,7 @@ class CallbackModule(CallbackBase):
|
||||
else:
|
||||
msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception']
|
||||
|
||||
self._display.display(msg, color='red')
|
||||
self._display.display(msg, color=C.COLOR_ERROR)
|
||||
|
||||
# finally, remove the exception from the result so it's not shown every time
|
||||
del result._result['exception']
|
||||
@@ -52,15 +53,16 @@ class CallbackModule(CallbackBase):
|
||||
self._process_items(result)
|
||||
else:
|
||||
if delegated_vars:
|
||||
self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color='red')
|
||||
self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color=C.COLOR_ERROR)
|
||||
else:
|
||||
self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red')
|
||||
self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_ERROR)
|
||||
|
||||
if result._task.ignore_errors:
|
||||
self._display.display("...ignoring", color='cyan')
|
||||
self._display.display("...ignoring", color=C.COLOR_SKIP)
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
|
||||
self._clean_results(result._result, result._task.action)
|
||||
delegated_vars = result._result.get('_ansible_delegated_vars', None)
|
||||
if result._task.action == 'include':
|
||||
return
|
||||
@@ -69,13 +71,13 @@ class CallbackModule(CallbackBase):
|
||||
msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
|
||||
else:
|
||||
msg = "changed: [%s]" % result._host.get_name()
|
||||
color = 'yellow'
|
||||
color = C.COLOR_CHANGED
|
||||
else:
|
||||
if delegated_vars:
|
||||
msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
|
||||
else:
|
||||
msg = "ok: [%s]" % result._host.get_name()
|
||||
color = 'green'
|
||||
color = C.COLOR_OK
|
||||
|
||||
if result._task.loop and 'results' in result._result:
|
||||
self._process_items(result)
|
||||
@@ -95,17 +97,17 @@ class CallbackModule(CallbackBase):
|
||||
msg = "skipping: [%s]" % result._host.get_name()
|
||||
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result:
|
||||
msg += " => %s" % self._dump_results(result._result)
|
||||
self._display.display(msg, color='cyan')
|
||||
self._display.display(msg, color=C.COLOR_SKIP)
|
||||
|
||||
def v2_runner_on_unreachable(self, result):
|
||||
delegated_vars = result._result.get('_ansible_delegated_vars', None)
|
||||
if delegated_vars:
|
||||
self._display.display("fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color='red')
|
||||
self._display.display("fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color=C.COLOR_ERROR)
|
||||
else:
|
||||
self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red')
|
||||
self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_ERROR)
|
||||
|
||||
def v2_playbook_on_no_hosts_matched(self):
|
||||
self._display.display("skipping: no hosts matched", color='cyan')
|
||||
self._display.display("skipping: no hosts matched", color=C.COLOR_SKIP)
|
||||
|
||||
def v2_playbook_on_no_hosts_remaining(self):
|
||||
self._display.banner("NO MORE HOSTS LEFT")
|
||||
@@ -115,7 +117,7 @@ class CallbackModule(CallbackBase):
|
||||
if self._display.verbosity > 2:
|
||||
path = task.get_path()
|
||||
if path:
|
||||
self._display.display("task path: %s" % path, color='dark gray')
|
||||
self._display.display("task path: %s" % path, color=C.COLOR_DEBUG)
|
||||
|
||||
def v2_playbook_on_cleanup_task_start(self, task):
|
||||
self._display.banner("CLEANUP TASK [%s]" % task.get_name().strip())
|
||||
@@ -133,7 +135,11 @@ class CallbackModule(CallbackBase):
|
||||
self._display.banner(msg)
|
||||
|
||||
def v2_on_file_diff(self, result):
|
||||
if 'diff' in result._result and result._result['diff']:
|
||||
if result._task.loop and 'results' in result._result:
|
||||
for res in result._result['results']:
|
||||
if 'diff' in res and res['diff']:
|
||||
self._display.display(self._get_diff(res['diff']))
|
||||
elif 'diff' in result._result and result._result['diff']:
|
||||
self._display.display(self._get_diff(result._result['diff']))
|
||||
|
||||
def v2_playbook_item_on_ok(self, result):
|
||||
@@ -146,13 +152,13 @@ class CallbackModule(CallbackBase):
|
||||
msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
|
||||
else:
|
||||
msg = "changed: [%s]" % result._host.get_name()
|
||||
color = 'yellow'
|
||||
color = C.COLOR_CHANGED
|
||||
else:
|
||||
if delegated_vars:
|
||||
msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
|
||||
else:
|
||||
msg = "ok: [%s]" % result._host.get_name()
|
||||
color = 'green'
|
||||
color = C.COLOR_OK
|
||||
|
||||
msg += " => (item=%s)" % (result._result['item'],)
|
||||
|
||||
@@ -170,15 +176,15 @@ class CallbackModule(CallbackBase):
|
||||
else:
|
||||
msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception']
|
||||
|
||||
self._display.display(msg, color='red')
|
||||
self._display.display(msg, color=C.COLOR_ERROR)
|
||||
|
||||
# finally, remove the exception from the result so it's not shown every time
|
||||
del result._result['exception']
|
||||
|
||||
if delegated_vars:
|
||||
self._display.display("failed: [%s -> %s] => (item=%s) => %s" % (result._host.get_name(), delegated_vars['ansible_host'], result._result['item'], self._dump_results(result._result)), color='red')
|
||||
self._display.display("failed: [%s -> %s] => (item=%s) => %s" % (result._host.get_name(), delegated_vars['ansible_host'], result._result['item'], self._dump_results(result._result)), color=C.COLOR_ERROR)
|
||||
else:
|
||||
self._display.display("failed: [%s] => (item=%s) => %s" % (result._host.get_name(), result._result['item'], self._dump_results(result._result)), color='red')
|
||||
self._display.display("failed: [%s] => (item=%s) => %s" % (result._host.get_name(), result._result['item'], self._dump_results(result._result)), color=C.COLOR_ERROR)
|
||||
|
||||
self._handle_warnings(result._result)
|
||||
|
||||
@@ -186,10 +192,37 @@ class CallbackModule(CallbackBase):
|
||||
msg = "skipping: [%s] => (item=%s) " % (result._host.get_name(), result._result['item'])
|
||||
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result:
|
||||
msg += " => %s" % self._dump_results(result._result)
|
||||
self._display.display(msg, color='cyan')
|
||||
self._display.display(msg, color=C.COLOR_SKIP)
|
||||
|
||||
def v2_playbook_on_include(self, included_file):
|
||||
msg = 'included: %s for %s' % (included_file._filename, ", ".join([h.name for h in included_file._hosts]))
|
||||
color = 'cyan'
|
||||
self._display.display(msg, color='cyan')
|
||||
color = C.COLOR_SKIP
|
||||
self._display.display(msg, color=C.COLOR_SKIP)
|
||||
|
||||
def v2_playbook_on_stats(self, stats):
|
||||
self._display.banner("PLAY RECAP")
|
||||
|
||||
hosts = sorted(stats.processed.keys())
|
||||
for h in hosts:
|
||||
t = stats.summarize(h)
|
||||
|
||||
self._display.display(u"%s : %s %s %s %s" % (
|
||||
hostcolor(h, t),
|
||||
colorize(u'ok', t['ok'], C.COLOR_OK),
|
||||
colorize(u'changed', t['changed'], C.COLOR_CHANGED),
|
||||
colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE),
|
||||
colorize(u'failed', t['failures'], C.COLOR_ERROR)),
|
||||
screen_only=True
|
||||
)
|
||||
|
||||
self._display.display(u"%s : %s %s %s %s" % (
|
||||
hostcolor(h, t, False),
|
||||
colorize(u'ok', t['ok'], None),
|
||||
colorize(u'changed', t['changed'], None),
|
||||
colorize(u'unreachable', t['unreachable'], None),
|
||||
colorize(u'failed', t['failures'], None)),
|
||||
log_only=True
|
||||
)
|
||||
|
||||
self._display.display("", screen_only=True)
|
||||
|
||||
|
||||
@@ -50,9 +50,9 @@ class CallbackModule(CallbackBase):
|
||||
CALLBACK_NAME = 'hipchat'
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
def __init__(self, display):
|
||||
def __init__(self):
|
||||
|
||||
super(CallbackModule, self).__init__(display)
|
||||
super(CallbackModule, self).__init__()
|
||||
|
||||
if not HAS_PRETTYTABLE:
|
||||
self.disabled = True
|
||||
|
||||
@@ -45,9 +45,9 @@ class CallbackModule(CallbackBase):
|
||||
TIME_FORMAT="%b %d %Y %H:%M:%S"
|
||||
MSG_FORMAT="%(now)s - %(category)s - %(data)s\n\n"
|
||||
|
||||
def __init__(self, display):
|
||||
def __init__(self):
|
||||
|
||||
super(CallbackModule, self).__init__(display)
|
||||
super(CallbackModule, self).__init__()
|
||||
|
||||
if not os.path.exists("/var/log/ansible/hosts"):
|
||||
os.makedirs("/var/log/ansible/hosts")
|
||||
|
||||
345
lib/ansible/plugins/callback/logentries.py
Normal file
345
lib/ansible/plugins/callback/logentries.py
Normal file
@@ -0,0 +1,345 @@
|
||||
""" (c) 2015, Logentries.com, Jimmy Tang <jimmy.tang@logentries.com>
|
||||
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
This callback plugin will generate json objects to be sent to logentries
|
||||
for auditing/debugging purposes.
|
||||
|
||||
Todo:
|
||||
|
||||
* Better formatting of output before sending out to logentries data/api nodes.
|
||||
|
||||
To use:
|
||||
|
||||
Add this to your ansible.cfg file in the defaults block
|
||||
|
||||
[defaults]
|
||||
callback_plugins = ./callback_plugins
|
||||
callback_stdout = logentries
|
||||
callback_whitelist = logentries
|
||||
|
||||
Copy the callback plugin into the callback_plugins directory
|
||||
|
||||
Either set the environment variables
|
||||
|
||||
export LOGENTRIES_API=data.logentries.com
|
||||
export LOGENTRIES_PORT=10000
|
||||
export LOGENTRIES_ANSIBLE_TOKEN=dd21fc88-f00a-43ff-b977-e3a4233c53af
|
||||
|
||||
Or create a logentries.ini config file that sites next to the plugin with the following contents
|
||||
|
||||
[logentries]
|
||||
api = data.logentries.com
|
||||
port = 10000
|
||||
tls_port = 20000
|
||||
use_tls = no
|
||||
token = dd21fc88-f00a-43ff-b977-e3a4233c53af
|
||||
flatten = False
|
||||
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
import socket
|
||||
import random
|
||||
import time
|
||||
import codecs
|
||||
import ConfigParser
|
||||
import uuid
|
||||
try:
|
||||
import certifi
|
||||
HAS_CERTIFI = True
|
||||
except ImportError:
|
||||
HAS_CERTIFI = False
|
||||
|
||||
try:
|
||||
import flatdict
|
||||
HAS_FLATDICT = True
|
||||
except ImportError:
|
||||
HAS_FLATDICT = False
|
||||
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
|
||||
|
||||
def to_unicode(ch):
|
||||
return codecs.unicode_escape_decode(ch)[0]
|
||||
|
||||
|
||||
def is_unicode(ch):
|
||||
return isinstance(ch, unicode)
|
||||
|
||||
|
||||
def create_unicode(ch):
|
||||
return unicode(ch, 'utf-8')
|
||||
|
||||
|
||||
class PlainTextSocketAppender(object):
|
||||
def __init__(self,
|
||||
verbose=True,
|
||||
LE_API='data.logentries.com',
|
||||
LE_PORT=80,
|
||||
LE_TLS_PORT=443):
|
||||
|
||||
self.LE_API = LE_API
|
||||
self.LE_PORT = LE_PORT
|
||||
self.LE_TLS_PORT = LE_TLS_PORT
|
||||
self.MIN_DELAY = 0.1
|
||||
self.MAX_DELAY = 10
|
||||
# Error message displayed when an incorrect Token has been detected
|
||||
self.INVALID_TOKEN = ("\n\nIt appears the LOGENTRIES_TOKEN "
|
||||
"parameter you entered is incorrect!\n\n")
|
||||
# Unicode Line separator character \u2028
|
||||
self.LINE_SEP = to_unicode('\u2028')
|
||||
|
||||
self.verbose = verbose
|
||||
self._conn = None
|
||||
|
||||
def open_connection(self):
|
||||
self._conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
self._conn.connect((self.LE_API, self.LE_PORT))
|
||||
|
||||
def reopen_connection(self):
|
||||
self.close_connection()
|
||||
|
||||
root_delay = self.MIN_DELAY
|
||||
while True:
|
||||
try:
|
||||
self.open_connection()
|
||||
return
|
||||
except Exception:
|
||||
if self.verbose:
|
||||
self._display.warning("Unable to connect to Logentries")
|
||||
|
||||
root_delay *= 2
|
||||
if (root_delay > self.MAX_DELAY):
|
||||
root_delay = self.MAX_DELAY
|
||||
|
||||
wait_for = root_delay + random.uniform(0, root_delay)
|
||||
|
||||
try:
|
||||
time.sleep(wait_for)
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
|
||||
def close_connection(self):
|
||||
if self._conn is not None:
|
||||
self._conn.close()
|
||||
|
||||
def put(self, data):
|
||||
# Replace newlines with Unicode line separator
|
||||
# for multi-line events
|
||||
if not is_unicode(data):
|
||||
multiline = create_unicode(data).replace('\n', self.LINE_SEP)
|
||||
else:
|
||||
multiline = data.replace('\n', self.LINE_SEP)
|
||||
multiline += "\n"
|
||||
# Send data, reconnect if needed
|
||||
while True:
|
||||
try:
|
||||
self._conn.send(multiline.encode('utf-8'))
|
||||
except socket.error:
|
||||
self.reopen_connection()
|
||||
continue
|
||||
break
|
||||
|
||||
self.close_connection()
|
||||
|
||||
|
||||
try:
|
||||
import ssl
|
||||
HAS_SSL=True
|
||||
except ImportError: # for systems without TLS support.
|
||||
SocketAppender = PlainTextSocketAppender
|
||||
HAS_SSL=False
|
||||
else:
|
||||
|
||||
class TLSSocketAppender(PlainTextSocketAppender):
|
||||
def open_connection(self):
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
sock = ssl.wrap_socket(
|
||||
sock=sock,
|
||||
keyfile=None,
|
||||
certfile=None,
|
||||
server_side=False,
|
||||
cert_reqs=ssl.CERT_REQUIRED,
|
||||
ssl_version=getattr(
|
||||
ssl, 'PROTOCOL_TLSv1_2', ssl.PROTOCOL_TLSv1),
|
||||
ca_certs=certifi.where(),
|
||||
do_handshake_on_connect=True,
|
||||
suppress_ragged_eofs=True, )
|
||||
sock.connect((self.LE_API, self.LE_TLS_PORT))
|
||||
self._conn = sock
|
||||
|
||||
SocketAppender = TLSSocketAppender
|
||||
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'notification'
|
||||
CALLBACK_NAME = 'logentries'
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
def __init__(self):
|
||||
super(CallbackModule, self).__init__()
|
||||
|
||||
if not HAS_SSL:
|
||||
self._display.warning("Unable to import ssl module. Will send over port 80.")
|
||||
|
||||
if not HAS_CERTIFI:
|
||||
self.disabled =True
|
||||
self._display.warning('The `certifi` python module is not installed. '
|
||||
'Disabling the Logentries callback plugin.')
|
||||
|
||||
if not HAS_FLATDICT:
|
||||
self.disabled =True
|
||||
self._display.warning('The `flatdict` python module is not installed. '
|
||||
'Disabling the Logentries callback plugin.')
|
||||
|
||||
config_path = os.path.abspath(os.path.dirname(__file__))
|
||||
config = ConfigParser.ConfigParser()
|
||||
try:
|
||||
config.readfp(open(os.path.join(config_path, 'logentries.ini')))
|
||||
if config.has_option('logentries', 'api'):
|
||||
self.api_uri = config.get('logentries', 'api')
|
||||
if config.has_option('logentries', 'port'):
|
||||
self.api_port = config.getint('logentries', 'port')
|
||||
if config.has_option('logentries', 'tls_port'):
|
||||
self.api_tls_port = config.getint('logentries', 'tls_port')
|
||||
if config.has_option('logentries', 'use_tls'):
|
||||
self.use_tls = config.getboolean('logentries', 'use_tls')
|
||||
if config.has_option('logentries', 'token'):
|
||||
self.token = config.get('logentries', 'token')
|
||||
if config.has_option('logentries', 'flatten'):
|
||||
self.flatten = config.getboolean('logentries', 'flatten')
|
||||
|
||||
except:
|
||||
self.api_uri = os.getenv('LOGENTRIES_API')
|
||||
if self.api_uri is None:
|
||||
self.api_uri = 'data.logentries.com'
|
||||
|
||||
try:
|
||||
self.api_port = int(os.getenv('LOGENTRIES_PORT'))
|
||||
if self.api_port is None:
|
||||
self.api_port = 80
|
||||
except TypeError:
|
||||
self.api_port = 80
|
||||
|
||||
try:
|
||||
self.api_tls_port = int(os.getenv('LOGENTRIES_TLS_PORT'))
|
||||
if self.api_tls_port is None:
|
||||
self.api_tls_port = 443
|
||||
except TypeError:
|
||||
self.api_tls_port = 443
|
||||
|
||||
# this just needs to be set to use TLS
|
||||
self.use_tls = os.getenv('LOGENTRIES_USE_TLS')
|
||||
if self.use_tls is None:
|
||||
self.use_tls = False
|
||||
elif self.use_tls.lower() in ['yes', 'true']:
|
||||
self.use_tls = True
|
||||
|
||||
self.token = os.getenv('LOGENTRIES_ANSIBLE_TOKEN')
|
||||
if self.token is None:
|
||||
self.disabled = True
|
||||
self._display.warning('Logentries token could not be loaded. The logentries token can be provided using the `LOGENTRIES_TOKEN` environment variable')
|
||||
|
||||
self.flatten = os.getenv('LOGENTRIES_FLATTEN')
|
||||
if self.flatten is None:
|
||||
self.flatten = False
|
||||
elif self.flatten.lower() in ['yes', 'true']:
|
||||
self.flatten = True
|
||||
|
||||
self.verbose = False
|
||||
self.timeout = 10
|
||||
self.le_jobid = str(uuid.uuid4())
|
||||
|
||||
if self.use_tls:
|
||||
self._appender = TLSSocketAppender(verbose=self.verbose,
|
||||
LE_API=self.api_uri,
|
||||
LE_TLS_PORT=self.api_tls_port)
|
||||
else:
|
||||
self._appender = PlainTextSocketAppender(verbose=self.verbose,
|
||||
LE_API=self.api_uri,
|
||||
LE_PORT=self.api_port)
|
||||
self._appender.reopen_connection()
|
||||
|
||||
def emit_formatted(self, record):
|
||||
if self.flatten:
|
||||
results = flatdict.FlatDict(record)
|
||||
self.emit(self._dump_results(results))
|
||||
else:
|
||||
self.emit(self._dump_results(record))
|
||||
|
||||
def emit(self, record):
|
||||
msg = record.rstrip('\n')
|
||||
msg = "{} {}".format(self.token, msg)
|
||||
self._appender.put(msg)
|
||||
|
||||
def runner_on_ok(self, host, res):
|
||||
results = {}
|
||||
results['le_jobid'] = self.le_jobid
|
||||
results['hostname'] = host
|
||||
results['results'] = res
|
||||
results['status'] = 'OK'
|
||||
self.emit_formatted(results)
|
||||
|
||||
def runner_on_failed(self, host, res, ignore_errors=False):
|
||||
results = {}
|
||||
results['le_jobid'] = self.le_jobid
|
||||
results['hostname'] = host
|
||||
results['results'] = res
|
||||
results['status'] = 'FAILED'
|
||||
self.emit_formatted(results)
|
||||
|
||||
def runner_on_skipped(self, host, item=None):
|
||||
results = {}
|
||||
results['le_jobid'] = self.le_jobid
|
||||
results['hostname'] = host
|
||||
results['status'] = 'SKIPPED'
|
||||
self.emit_formatted(results)
|
||||
|
||||
def runner_on_unreachable(self, host, res):
|
||||
results = {}
|
||||
results['le_jobid'] = self.le_jobid
|
||||
results['hostname'] = host
|
||||
results['results'] = res
|
||||
results['status'] = 'UNREACHABLE'
|
||||
self.emit_formatted(results)
|
||||
|
||||
def runner_on_async_failed(self, host, res, jid):
|
||||
results = {}
|
||||
results['le_jobid'] = self.le_jobid
|
||||
results['hostname'] = host
|
||||
results['results'] = res
|
||||
results['jid'] = jid
|
||||
results['status'] = 'ASYNC_FAILED'
|
||||
self.emit_formatted(results)
|
||||
|
||||
def v2_playbook_on_play_start(self, play):
|
||||
results = {}
|
||||
results['le_jobid'] = self.le_jobid
|
||||
results['started_by'] = os.getlogin()
|
||||
if play.name:
|
||||
results['play'] = play.name
|
||||
results['hosts'] = play.hosts
|
||||
self.emit_formatted(results)
|
||||
|
||||
def playbook_on_stats(self, stats):
|
||||
""" close connection """
|
||||
self._appender.close_connection()
|
||||
@@ -53,28 +53,32 @@ class CallbackModule(CallbackBase):
|
||||
else:
|
||||
msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception']
|
||||
|
||||
self._display.display(msg, color='red')
|
||||
self._display.display(msg, color=C.COLOR_ERROR)
|
||||
|
||||
# finally, remove the exception from the result so it's not shown every time
|
||||
del result._result['exception']
|
||||
|
||||
if result._task.action in C.MODULE_NO_JSON:
|
||||
self._display.display(self._command_generic_msg(result._host.get_name(), result._result, "FAILED"), color='red')
|
||||
self._display.display(self._command_generic_msg(result._host.get_name(), result._result, "FAILED"), color=C.COLOR_ERROR)
|
||||
else:
|
||||
self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color='red')
|
||||
self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_ERROR)
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
self._clean_results(result._result, result._task.action)
|
||||
if result._task.action in C.MODULE_NO_JSON:
|
||||
self._display.display(self._command_generic_msg(result._host.get_name(), result._result, "SUCCESS"), color='green')
|
||||
self._display.display(self._command_generic_msg(result._host.get_name(), result._result, "SUCCESS"), color=C.COLOR_OK)
|
||||
else:
|
||||
self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color='green')
|
||||
if 'changed' in result._result and result._result['changed']:
|
||||
self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_CHANGED)
|
||||
else:
|
||||
self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_OK)
|
||||
self._handle_warnings(result._result)
|
||||
|
||||
def v2_runner_on_skipped(self, result):
|
||||
self._display.display("%s | SKIPPED" % (result._host.get_name()), color='cyan')
|
||||
self._display.display("%s | SKIPPED" % (result._host.get_name()), color=C.COLOR_SKIP)
|
||||
|
||||
def v2_runner_on_unreachable(self, result):
|
||||
self._display.display("%s | UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color='yellow')
|
||||
self._display.display("%s | UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_UNREACHABLE)
|
||||
|
||||
def v2_on_file_diff(self, result):
|
||||
if 'diff' in result._result and result._result['diff']:
|
||||
|
||||
@@ -52,24 +52,24 @@ class CallbackModule(CallbackBase):
|
||||
msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'].replace('\n','')
|
||||
|
||||
if result._task.action in C.MODULE_NO_JSON:
|
||||
self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'FAILED'), color='red')
|
||||
self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'FAILED'), color=C.COLOR_ERROR)
|
||||
else:
|
||||
self._display.display(msg, color='red')
|
||||
self._display.display(msg, color=C.COLOR_ERROR)
|
||||
|
||||
# finally, remove the exception from the result so it's not shown every time
|
||||
del result._result['exception']
|
||||
|
||||
self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color='red')
|
||||
self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color=C.COLOR_ERROR)
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
if result._task.action in C.MODULE_NO_JSON:
|
||||
self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'SUCCESS'), color='green')
|
||||
self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'SUCCESS'), color=C.COLOR_OK)
|
||||
else:
|
||||
self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color='green')
|
||||
self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color=C.COLOR_OK)
|
||||
|
||||
|
||||
def v2_runner_on_unreachable(self, result):
|
||||
self._display.display("%s | UNREACHABLE!" % result._host.get_name(), color='yellow')
|
||||
self._display.display("%s | UNREACHABLE!" % result._host.get_name(), color=C.COLOR_UNREACHABLE)
|
||||
|
||||
def v2_runner_on_skipped(self, result):
|
||||
self._display.display("%s | SKIPPED" % (result._host.get_name()), color='cyan')
|
||||
self._display.display("%s | SKIPPED" % (result._host.get_name()), color=C.COLOR_SKIP)
|
||||
|
||||
@@ -40,9 +40,9 @@ class CallbackModule(CallbackBase):
|
||||
CALLBACK_NAME = 'osx_say'
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
def __init__(self, display):
|
||||
def __init__(self):
|
||||
|
||||
super(CallbackModule, self).__init__(display)
|
||||
super(CallbackModule, self).__init__()
|
||||
|
||||
# plugin disable itself if say is not present
|
||||
# ansible will not call any callback if disabled is set to True
|
||||
|
||||
@@ -71,11 +71,11 @@ class CallbackModule(CallbackBase):
|
||||
CALLBACK_NAME = 'profile_tasks'
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
def __init__(self, display):
|
||||
def __init__(self):
|
||||
self.stats = {}
|
||||
self.current = None
|
||||
|
||||
super(CallbackModule, self).__init__(display)
|
||||
super(CallbackModule, self).__init__()
|
||||
|
||||
def _record_task(self, name):
|
||||
"""
|
||||
|
||||
@@ -19,9 +19,9 @@
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
class CallbackModule(CallbackModule_default):
|
||||
|
||||
'''
|
||||
This is the default callback interface, which simply prints messages
|
||||
@@ -32,130 +32,8 @@ class CallbackModule(CallbackBase):
|
||||
CALLBACK_TYPE = 'stdout'
|
||||
CALLBACK_NAME = 'skippy'
|
||||
|
||||
def v2_runner_on_failed(self, result, ignore_errors=False):
|
||||
if 'exception' in result._result:
|
||||
if self._display.verbosity < 3:
|
||||
# extract just the actual error message from the exception text
|
||||
error = result._result['exception'].strip().split('\n')[-1]
|
||||
msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error
|
||||
else:
|
||||
msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception']
|
||||
|
||||
self._display.display(msg, color='red')
|
||||
|
||||
# finally, remove the exception from the result so it's not shown every time
|
||||
del result._result['exception']
|
||||
|
||||
if result._task.loop and 'results' in result._result:
|
||||
self._process_items(result)
|
||||
else:
|
||||
if result._task.delegate_to:
|
||||
self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), result._task.delegate_to, self._dump_results(result._result)), color='red')
|
||||
else:
|
||||
self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red')
|
||||
|
||||
if result._task.ignore_errors:
|
||||
self._display.display("...ignoring", color='cyan')
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
|
||||
if result._task.action == 'include':
|
||||
msg = 'included: %s for %s' % (result._task.args.get('_raw_params'), result._host.name)
|
||||
color = 'cyan'
|
||||
elif result._result.get('changed', False):
|
||||
if result._task.delegate_to is not None:
|
||||
msg = "changed: [%s -> %s]" % (result._host.get_name(), result._task.delegate_to)
|
||||
else:
|
||||
msg = "changed: [%s]" % result._host.get_name()
|
||||
color = 'yellow'
|
||||
else:
|
||||
if result._task.delegate_to is not None:
|
||||
msg = "ok: [%s -> %s]" % (result._host.get_name(), result._task.delegate_to)
|
||||
else:
|
||||
msg = "ok: [%s]" % result._host.get_name()
|
||||
color = 'green'
|
||||
|
||||
if result._task.loop and 'results' in result._result:
|
||||
self._process_items(result)
|
||||
else:
|
||||
|
||||
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result and result._task.action != 'include':
|
||||
msg += " => %s" % (self._dump_results(result._result),)
|
||||
self._display.display(msg, color=color)
|
||||
|
||||
self._handle_warnings(result._result)
|
||||
|
||||
|
||||
def v2_runner_on_unreachable(self, result):
|
||||
if result._task.delegate_to:
|
||||
self._display.display("fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), result._task.delegate_to, self._dump_results(result._result)), color='red')
|
||||
else:
|
||||
self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red')
|
||||
|
||||
def v2_playbook_on_no_hosts_matched(self):
|
||||
self._display.display("skipping: no hosts matched", color='cyan')
|
||||
|
||||
def v2_playbook_on_no_hosts_remaining(self):
|
||||
self._display.banner("NO MORE HOSTS LEFT")
|
||||
|
||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||
self._display.banner("TASK [%s]" % task.get_name().strip())
|
||||
if self._display.verbosity > 2:
|
||||
path = task.get_path()
|
||||
if path:
|
||||
self._display.display("task path: %s" % path, color='dark gray')
|
||||
|
||||
def v2_playbook_on_cleanup_task_start(self, task):
|
||||
self._display.banner("CLEANUP TASK [%s]" % task.get_name().strip())
|
||||
|
||||
def v2_playbook_on_handler_task_start(self, task):
|
||||
self._display.banner("RUNNING HANDLER [%s]" % task.get_name().strip())
|
||||
|
||||
def v2_playbook_on_play_start(self, play):
|
||||
name = play.get_name().strip()
|
||||
if not name:
|
||||
msg = "PLAY"
|
||||
else:
|
||||
msg = "PLAY [%s]" % name
|
||||
|
||||
self._display.banner(msg)
|
||||
|
||||
def v2_on_file_diff(self, result):
|
||||
if 'diff' in result._result and result._result['diff']:
|
||||
self._display.display(self._get_diff(result._result['diff']))
|
||||
|
||||
def v2_playbook_item_on_ok(self, result):
|
||||
|
||||
if result._task.action == 'include':
|
||||
msg = 'included: %s for %s' % (result._task.args.get('_raw_params'), result._host.name)
|
||||
color = 'cyan'
|
||||
elif result._result.get('changed', False):
|
||||
msg = "changed: [%s]" % result._host.get_name()
|
||||
color = 'yellow'
|
||||
else:
|
||||
msg = "ok: [%s]" % result._host.get_name()
|
||||
color = 'green'
|
||||
|
||||
msg += " => (item=%s)" % (result._result['item'],)
|
||||
|
||||
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result and result._task.action != 'include':
|
||||
msg += " => %s" % self._dump_results(result._result)
|
||||
self._display.display(msg, color=color)
|
||||
|
||||
def v2_playbook_item_on_failed(self, result):
|
||||
if 'exception' in result._result:
|
||||
if self._display.verbosity < 3:
|
||||
# extract just the actual error message from the exception text
|
||||
error = result._result['exception'].strip().split('\n')[-1]
|
||||
msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error
|
||||
else:
|
||||
msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception']
|
||||
|
||||
self._display.display(msg, color='red')
|
||||
|
||||
# finally, remove the exception from the result so it's not shown every time
|
||||
del result._result['exception']
|
||||
|
||||
self._display.display("failed: [%s] => (item=%s) => %s" % (result._host.get_name(), result._result['item'], self._dump_results(result._result)), color='red')
|
||||
self._handle_warnings(result._result)
|
||||
def v2_runner_on_skipped(self, result):
|
||||
pass
|
||||
|
||||
def v2_playbook_item_on_skipped(self, result):
|
||||
pass
|
||||
|
||||
@@ -28,9 +28,9 @@ class CallbackModule(CallbackBase):
|
||||
CALLBACK_NAME = 'syslog_json'
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
def __init__(self, display):
|
||||
def __init__(self):
|
||||
|
||||
super(CallbackModule, self).__init__(display)
|
||||
super(CallbackModule, self).__init__()
|
||||
|
||||
self.logger = logging.getLogger('ansible logger')
|
||||
self.logger.setLevel(logging.DEBUG)
|
||||
|
||||
@@ -16,9 +16,9 @@ class CallbackModule(CallbackBase):
|
||||
CALLBACK_NAME = 'timer'
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
def __init__(self, display):
|
||||
def __init__(self):
|
||||
|
||||
super(CallbackModule, self).__init__(display)
|
||||
super(CallbackModule, self).__init__()
|
||||
|
||||
self.start_time = datetime.now()
|
||||
|
||||
|
||||
@@ -36,12 +36,13 @@ class CallbackModule(CallbackBase):
|
||||
CALLBACK_NAME = 'tree'
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
def __init__(self, display):
|
||||
super(CallbackModule, self).__init__(display)
|
||||
def __init__(self):
|
||||
super(CallbackModule, self).__init__()
|
||||
|
||||
self.tree = TREE_DIR
|
||||
if not self.tree:
|
||||
self._display.warnings("Disabling tree callback, invalid directory provided to tree option: %s" % self.tree)
|
||||
self.tree = os.path.expanduser("~/.ansible/tree")
|
||||
self._display.warning("The tree callback is defaulting to ~/.ansible/tree, as an invalid directory was provided: %s" % self.tree)
|
||||
|
||||
def write_tree_file(self, hostname, buf):
|
||||
''' write something into treedir/hostname '''
|
||||
@@ -53,7 +54,7 @@ class CallbackModule(CallbackBase):
|
||||
with open(path, 'wb+') as fd:
|
||||
fd.write(buf)
|
||||
except (OSError, IOError) as e:
|
||||
self._display.warnings("Unable to write to %s's file: %s" % (hostname, str(e)))
|
||||
self._display.warning("Unable to write to %s's file: %s" % (hostname, str(e)))
|
||||
|
||||
def result_to_tree(self, result):
|
||||
if self.tree:
|
||||
|
||||
@@ -75,6 +75,7 @@ class ConnectionBase(with_metaclass(ABCMeta, object)):
|
||||
|
||||
self.success_key = None
|
||||
self.prompt = None
|
||||
self._connected = False
|
||||
|
||||
# load the shell plugin for this action/connection
|
||||
if play_context.shell:
|
||||
@@ -88,6 +89,11 @@ class ConnectionBase(with_metaclass(ABCMeta, object)):
|
||||
if not self._shell:
|
||||
raise AnsibleError("Invalid shell type specified (%s), or the plugin for that shell type is missing." % shell_type)
|
||||
|
||||
@property
|
||||
def connected(self):
|
||||
'''Read-only property holding whether the connection to the remote host is active or closed.'''
|
||||
return self._connected
|
||||
|
||||
def _become_method_supported(self):
|
||||
''' Checks if the current class supports this privilege escalation method '''
|
||||
|
||||
@@ -200,7 +206,10 @@ class ConnectionBase(with_metaclass(ABCMeta, object)):
|
||||
pass
|
||||
|
||||
def check_become_success(self, output):
|
||||
return self._play_context.success_key == output.rstrip()
|
||||
for line in output.splitlines(True):
|
||||
if self._play_context.success_key == line.rstrip():
|
||||
return True
|
||||
return False
|
||||
|
||||
def check_password_prompt(self, output):
|
||||
if self._play_context.prompt is None:
|
||||
|
||||
@@ -30,6 +30,7 @@ from ansible import constants as C
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.connection import ConnectionBase
|
||||
from ansible.module_utils.basic import is_executable
|
||||
from ansible.utils.unicode import to_bytes
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
@@ -90,6 +91,7 @@ class Connection(ConnectionBase):
|
||||
local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd]
|
||||
|
||||
display.vvv("EXEC %s" % (local_cmd), host=self.chroot)
|
||||
local_cmd = map(to_bytes, local_cmd)
|
||||
p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user