whitespace + remove deprecated YAML parser (migration script lives in examples/scripts and warning was added

in 0.6 release)
This commit is contained in:
Michael DeHaan
2012-08-06 20:07:02 -04:00
parent 0810f26095
commit faed4b5a33
36 changed files with 306 additions and 450 deletions

View File

@@ -29,8 +29,8 @@ elif os.path.exists("/usr/games/cowsay"):
cowsay = "/usr/games/cowsay"
class AggregateStats(object):
''' holds stats about per-host activity during playbook runs '''
''' holds stats about per-host activity during playbook runs '''
def __init__(self):
self.processed = {}
@@ -49,7 +49,7 @@ class AggregateStats(object):
def compute(self, runner_results, setup=False, poll=False):
''' walk through all results and increment stats '''
for (host, value) in runner_results.get('contacted', {}).iteritems():
if ('failed' in value and bool(value['failed'])) or ('rc' in value and value['rc'] != 0):
self._increment('failures', host)
@@ -65,7 +65,7 @@ class AggregateStats(object):
for (host, value) in runner_results.get('dark', {}).iteritems():
self._increment('dark', host)
def summarize(self, host):
''' return information about a particular host '''
@@ -92,10 +92,10 @@ def regular_generic_msg(hostname, result, oneline, caption):
def banner(msg):
if cowsay != None:
cmd = subprocess.Popen("%s -W 60 \"%s\"" % (cowsay, msg),
cmd = subprocess.Popen("%s -W 60 \"%s\"" % (cowsay, msg),
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(out, err) = cmd.communicate()
return "%s\n" % out
return "%s\n" % out
else:
return "\n%s ********************* " % msg
@@ -182,7 +182,7 @@ class CliRunnerCallbacks(DefaultRunnerCallbacks):
def __init__(self):
# set by /usr/bin/ansible later
self.options = None
self.options = None
self._async_notified = {}
def on_failed(self, host, res, ignore_errors=False):
@@ -192,7 +192,7 @@ class CliRunnerCallbacks(DefaultRunnerCallbacks):
def on_ok(self, host, res):
self._on_any(host,res)
def on_unreachable(self, host, res):
if type(res) == dict:
@@ -200,17 +200,17 @@ class CliRunnerCallbacks(DefaultRunnerCallbacks):
print "%s | FAILED => %s" % (host, res)
if self.options.tree:
utils.write_tree_file(
self.options.tree, host,
self.options.tree, host,
utils.jsonify(dict(failed=True, msg=res),format=True)
)
def on_skipped(self, host):
pass
def on_error(self, host, err):
print >>sys.stderr, "err: [%s] => %s\n" % (host, err)
def on_no_hosts(self):
print >>sys.stderr, "no hosts matched\n"
@@ -277,11 +277,11 @@ class PlaybookRunnerCallbacks(DefaultRunnerCallbacks):
item = host_result.get('item', None)
# show verbose output for non-setup module results if --verbose is used
msg = ''
msg = ''
if not self.verbose or host_result.get("verbose_override",None) is not None:
if item:
msg = "ok: [%s] => (item=%s)" % (host,item)
else:
else:
if 'ansible_job_id' not in host_result or 'finished' in host_result:
msg = "ok: [%s]" % (host)
else:
@@ -347,7 +347,7 @@ class PlaybookRunnerCallbacks(DefaultRunnerCallbacks):
class PlaybookCallbacks(object):
''' playbook.py callbacks used by /usr/bin/ansible-playbook '''
def __init__(self, verbose=False):
self.verbose = verbose
@@ -376,11 +376,11 @@ class PlaybookCallbacks(object):
if private:
return getpass.getpass(msg)
return raw_input(msg)
def on_setup(self):
print banner("GATHERING FACTS")
def on_import_for_host(self, host, imported_file):
msg = "%s: importing %s" % (host, imported_file)

View File

@@ -20,7 +20,7 @@ class AnsibleError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg

View File

@@ -23,7 +23,6 @@ import os
import subprocess
import ansible.constants as C
from ansible.inventory.ini import InventoryParser
from ansible.inventory.yaml import InventoryParserYaml
from ansible.inventory.script import InventoryScript
from ansible.inventory.group import Group
from ansible.inventory.host import Host
@@ -31,12 +30,12 @@ from ansible import errors
from ansible import utils
class Inventory(object):
"""
"""
Host inventory for ansible.
"""
__slots__ = [ 'host_list', 'groups', '_restriction', '_is_script',
'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache' ]
'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache' ]
def __init__(self, host_list=C.DEFAULT_HOST_LIST):
@@ -46,14 +45,14 @@ class Inventory(object):
# caching to avoid repeated calculations, particularly with
# external inventory scripts.
self._vars_per_host = {}
self._vars_per_group = {}
self._hosts_cache = {}
# the inventory object holds a list of groups
self.groups = []
# a list of host(names) to contain current inquiries to
self._restriction = None
@@ -83,10 +82,9 @@ class Inventory(object):
if not data.startswith("---"):
self.parser = InventoryParser(filename=host_list)
self.groups = self.parser.groups.values()
else:
self.parser = InventoryParserYaml(filename=host_list)
self.groups = self.parser.groups.values()
else:
raise errors.AnsibleError("YAML inventory support is deprecated in 0.6 and removed in 0.7, see the migration script in examples/scripts in the git checkout")
def _match(self, str, pattern_str):
return fnmatch.fnmatch(str, pattern_str)
@@ -107,7 +105,7 @@ class Inventory(object):
for host in group.get_hosts():
if self._match(group.name, pat) or pat == 'all' or self._match(host.name, pat):
# must test explicitly for None because [] means no hosts allowed
if self._restriction==None or host.name in self._restriction:
if self._restriction==None or host.name in self._restriction:
if inverted:
if host.name in hosts:
del hosts[host.name]
@@ -135,7 +133,7 @@ class Inventory(object):
if group.name == groupname:
return group
return None
def get_group_variables(self, groupname):
if groupname not in self._vars_per_group:
self._vars_per_group[groupname] = self._get_group_variables(groupname)
@@ -157,8 +155,8 @@ class Inventory(object):
if self._is_script:
host = self.get_host(hostname)
cmd = subprocess.Popen(
[self.host_list,"--host",hostname],
stdout=subprocess.PIPE,
[self.host_list,"--host",hostname],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
(out, err) = cmd.communicate()
@@ -184,7 +182,7 @@ class Inventory(object):
return [ h.name for h in self.get_hosts(pattern) ]
def list_groups(self):
return [ g.name for g in self.groups ]
return [ g.name for g in self.groups ]
def get_restriction(self):
return self._restriction

View File

@@ -40,15 +40,15 @@ def detect_range(line = None):
Returnes True if the given line contains a pattern, else False.
'''
if (not line.startswith("[") and
line.find("[") != -1 and
if (not line.startswith("[") and
line.find("[") != -1 and
line.find(":") != -1 and
line.find("]") != -1 and
line.index("[") < line.index(":") < line.index("]")):
line.index("[") < line.index(":") < line.index("]")):
return True
else:
return False
def expand_hostname_range(line = None):
'''
A helper function that expands a given line that contains a pattern
@@ -64,11 +64,11 @@ def expand_hostname_range(line = None):
all_hosts = []
if line:
# A hostname such as db[1:6]-node is considered to consists
# three parts:
# three parts:
# head: 'db'
# nrange: [1:6]; range() is a built-in. Can't use the name
# tail: '-node'
(head, nrange, tail) = line.replace('[','|').replace(']','|').split('|')
bounds = nrange.split(":")
if len(bounds) != 2:
@@ -85,7 +85,7 @@ def expand_hostname_range(line = None):
rlen = None
if rlen > 1 and rlen != len(end):
raise errors.AnsibleError("host range format incorrectly specified!")
for _ in range(int(beg), int(end)+1):
if rlen:
rseq = str(_).zfill(rlen) # range sequence
@@ -93,5 +93,5 @@ def expand_hostname_range(line = None):
rseq = str(_)
hname = ''.join((head, rseq, tail))
all_hosts.append(hname)
return all_hosts

View File

@@ -52,7 +52,7 @@ class Group(object):
for kid in self.child_groups:
hosts.extend(kid.get_hosts())
hosts.extend(self.hosts)
return hosts
return hosts
def get_variables(self):

View File

@@ -49,7 +49,7 @@ class Host(object):
groups[g.name] = g
ancestors = g.get_ancestors()
for a in ancestors:
groups[a.name] = a
groups[a.name] = a
return groups.values()
def get_variables(self):

View File

@@ -30,7 +30,7 @@ from ansible import errors
from ansible import utils
class InventoryParser(object):
"""
"""
Host inventory for ansible.
"""
@@ -41,20 +41,20 @@ class InventoryParser(object):
self.groups = {}
self.hosts = {}
self._parse()
def _parse(self):
self._parse_base_groups()
self._parse_group_children()
self._parse_group_variables()
return self.groups
# [webservers]
# alpha
# beta:2345
# gamma sudo=True user=root
# delta asdf=jkl favcolor=red
# delta asdf=jkl favcolor=red
def _parse_base_groups(self):
# FIXME: refactor
@@ -93,7 +93,7 @@ class InventoryParser(object):
tokens2 = hostname.rsplit(":", 1)
hostname = tokens2[0]
port = tokens2[1]
host = None
_all_hosts = []
if hostname in self.hosts:

View File

@@ -52,5 +52,5 @@ class InventoryScript(object):
# FIXME: hack shouldn't be needed
all.add_host(host)
all.add_child_group(group)
return groups
return groups

View File

@@ -1,142 +0,0 @@
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import ansible.constants as C
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible import errors
from ansible import utils
import sys
class InventoryParserYaml(object):
''' Host inventory parser for ansible '''
__slots__ = [ '_hosts', 'groups' ]
def __init__(self, filename=C.DEFAULT_HOST_LIST):
sys.stderr.write("WARNING: YAML inventory files are deprecated in 0.6 and will be removed in 0.7, to migrate" +
" download and run https://github.com/ansible/ansible/blob/devel/examples/scripts/yaml_to_ini.py\n")
fh = open(filename)
data = fh.read()
fh.close()
self._hosts = {}
self._parse(data)
def _make_host(self, hostname):
if hostname in self._hosts:
return self._hosts[hostname]
else:
host = Host(hostname)
self._hosts[hostname] = host
return host
# see file 'test/yaml_hosts' for syntax
def _parse(self, data):
# FIXME: refactor into subfunctions
all = Group('all')
ungrouped = Group('ungrouped')
all.add_child_group(ungrouped)
self.groups = dict(all=all, ungrouped=ungrouped)
grouped_hosts = []
yaml = utils.parse_yaml(data)
# first add all groups
for item in yaml:
if type(item) == dict and 'group' in item:
group = Group(item['group'])
for subresult in item.get('hosts',[]):
if type(subresult) in [ str, unicode ]:
host = self._make_host(subresult)
group.add_host(host)
grouped_hosts.append(host)
elif type(subresult) == dict:
host = self._make_host(subresult['host'])
vars = subresult.get('vars',{})
if type(vars) == list:
for subitem in vars:
for (k,v) in subitem.items():
host.set_variable(k,v)
elif type(vars) == dict:
for (k,v) in subresult.get('vars',{}).items():
host.set_variable(k,v)
else:
raise errors.AnsibleError("unexpected type for variable")
group.add_host(host)
grouped_hosts.append(host)
vars = item.get('vars',{})
if type(vars) == dict:
for (k,v) in item.get('vars',{}).items():
group.set_variable(k,v)
elif type(vars) == list:
for subitem in vars:
if type(subitem) != dict:
raise errors.AnsibleError("expected a dictionary")
for (k,v) in subitem.items():
group.set_variable(k,v)
self.groups[group.name] = group
all.add_child_group(group)
# add host definitions
for item in yaml:
if type(item) in [ str, unicode ]:
host = self._make_host(item)
if host not in grouped_hosts:
ungrouped.add_host(host)
elif type(item) == dict and 'host' in item:
host = self._make_host(item['host'])
vars = item.get('vars', {})
if type(vars)==list:
varlist, vars = vars, {}
for subitem in varlist:
vars.update(subitem)
for (k,v) in vars.items():
host.set_variable(k,v)
groups = item.get('groups', {})
if type(groups) in [ str, unicode ]:
groups = [ groups ]
if type(groups)==list:
for subitem in groups:
if subitem in self.groups:
group = self.groups[subitem]
else:
group = Group(subitem)
self.groups[group.name] = group
all.add_child_group(group)
group.add_host(host)
grouped_hosts.append(host)
if host not in grouped_hosts:
ungrouped.add_host(host)
# make sure ungrouped.hosts is the complement of grouped_hosts
ungrouped_hosts = [host for host in ungrouped.hosts if host not in grouped_hosts]

View File

@@ -56,7 +56,7 @@ except ImportError:
class AnsibleModule(object):
def __init__(self, argument_spec, bypass_checks=False, no_log=False, check_invalid_arguments=True):
'''
'''
common code for quickly building an ansible module in Python
(although you can write modules in anything that can return JSON)
see library/slurp and others for examples
@@ -67,7 +67,7 @@ class AnsibleModule(object):
self._legal_inputs = []
self._handle_aliases()
# this may be disabled where modules are going to daisy chain into others
if check_invalid_arguments:
self._check_invalid_arguments()
@@ -151,7 +151,7 @@ class AnsibleModule(object):
for x in items:
(k, v) = x.split("=",1)
params[k] = v
return (params, args)
return (params, args)
def _log_invocation(self):
''' log that ansible ran the module '''
@@ -173,7 +173,7 @@ class AnsibleModule(object):
return False
else:
self.fail_json(msg='Boolean %s not in either boolean list' % arg)
def jsonify(self, data):
return json.dumps(data)

View File

@@ -28,12 +28,12 @@ SETUP_CACHE = collections.defaultdict(dict)
class PlayBook(object):
'''
runs an ansible playbook, given as a datastructure or YAML filename.
A playbook is a deployment, config management, or automation based
runs an ansible playbook, given as a datastructure or YAML filename.
A playbook is a deployment, config management, or automation based
set of commands to run in series.
multiple plays/tasks do not execute simultaneously, but tasks in each
pattern do execute in parallel (according to the number of forks
multiple plays/tasks do not execute simultaneously, but tasks in each
pattern do execute in parallel (according to the number of forks
requested) among the hosts they address
'''
@@ -86,7 +86,7 @@ class PlayBook(object):
extra_vars = {}
if only_tags is None:
only_tags = [ 'all' ]
self.module_path = module_path
self.forks = forks
self.timeout = timeout
@@ -107,7 +107,7 @@ class PlayBook(object):
self.only_tags = only_tags
self.inventory = ansible.inventory.Inventory(host_list)
if not self.inventory._is_script:
self.global_vars.update(self.inventory.get_group_variables('all'))
@@ -143,7 +143,7 @@ class PlayBook(object):
return accumulated_plays
# *****************************************************
def run(self):
''' run all patterns in the playbook '''
@@ -186,11 +186,11 @@ class PlayBook(object):
pattern=task.play.hosts, inventory=self.inventory, module_name=task.module_name,
module_args=task.module_args, forks=self.forks,
remote_pass=self.remote_pass, module_path=self.module_path,
timeout=self.timeout, remote_user=task.play.remote_user,
timeout=self.timeout, remote_user=task.play.remote_user,
remote_port=task.play.remote_port, module_vars=task.module_vars,
private_key_file=self.private_key_file,
setup_cache=self.SETUP_CACHE, basedir=self.basedir,
conditional=task.only_if, callbacks=self.runner_callbacks,
conditional=task.only_if, callbacks=self.runner_callbacks,
verbose=self.verbose, sudo=task.play.sudo, sudo_user=task.play.sudo_user,
transport=task.play.transport, sudo_pass=self.sudo_pass, is_playbook=True
)
@@ -226,7 +226,7 @@ class PlayBook(object):
for host, result in results['contacted'].iteritems():
facts = result.get('ansible_facts', {})
self.SETUP_CACHE[host].update(facts)
# flag which notify handlers need to be run
if len(task.notify) > 0:
for host, results in results.get('contacted',{}).iteritems():
@@ -237,7 +237,7 @@ class PlayBook(object):
# *****************************************************
def _flag_handler(self, handlers, handler_name, host):
'''
'''
if a task has any notify elements, flag handlers for run
at end of execution cycle for hosts that have indicated
changes have been made
@@ -256,8 +256,8 @@ class PlayBook(object):
def _do_setup_step(self, play):
''' get facts from the remote system '''
host_list = [ h for h in self.inventory.list_hosts(play.hosts)
host_list = [ h for h in self.inventory.list_hosts(play.hosts)
if not (h in self.stats.failures or h in self.stats.dark) ]
if not play.gather_facts:
@@ -271,7 +271,7 @@ class PlayBook(object):
pattern=play.hosts, module_name='setup', module_args={}, inventory=self.inventory,
forks=self.forks, module_path=self.module_path, timeout=self.timeout, remote_user=play.remote_user,
remote_pass=self.remote_pass, remote_port=play.remote_port, private_key_file=self.private_key_file,
setup_cache=self.SETUP_CACHE, callbacks=self.runner_callbacks, sudo=play.sudo, sudo_user=play.sudo_user,
setup_cache=self.SETUP_CACHE, callbacks=self.runner_callbacks, sudo=play.sudo, sudo_user=play.sudo_user,
verbose=self.verbose, transport=play.transport, sudo_pass=self.sudo_pass, is_playbook=True
).run()
self.stats.compute(setup_results, setup=True)
@@ -297,14 +297,14 @@ class PlayBook(object):
self.callbacks.on_play_start(play.name)
# get facts from system
rc = self._do_setup_step(play)
rc = self._do_setup_step(play)
# now with that data, handle contentional variable file imports!
if play.vars_files and len(play.vars_files) > 0:
play.update_vars_files(self.inventory.list_hosts(play.hosts))
for task in play.tasks():
# only run the task if the requested tags match
should_run = False
for x in self.only_tags:

View File

@@ -25,10 +25,10 @@ import os
class Play(object):
__slots__ = [
'hosts', 'name', 'vars', 'vars_prompt', 'vars_files',
__slots__ = [
'hosts', 'name', 'vars', 'vars_prompt', 'vars_files',
'handlers', 'remote_user', 'remote_port',
'sudo', 'sudo_user', 'transport', 'playbook',
'sudo', 'sudo_user', 'transport', 'playbook',
'tags', 'gather_facts', '_ds', '_handlers', '_tasks'
]
@@ -60,7 +60,7 @@ class Play(object):
self._ds = ds
self.playbook = playbook
self.hosts = hosts
self.hosts = hosts
self.name = ds.get('name', self.hosts)
self.vars = ds.get('vars', {})
self.vars_files = ds.get('vars_files', [])
@@ -126,7 +126,7 @@ class Play(object):
def tasks(self):
''' return task objects for this play '''
return self._tasks
return self._tasks
def handlers(self):
''' return handler objects for this play '''
@@ -146,7 +146,7 @@ class Play(object):
raise errors.AnsibleError("'vars' section must contain only key/value pairs")
vars = self.playbook.global_vars
# translate a list of vars into a dict
if type(self.vars) == list:
for item in self.vars:
@@ -178,7 +178,7 @@ class Play(object):
def update_vars_files(self, hosts):
''' calculate vars_files, which requires that setup runs first so ansible facts can be mixed in '''
# now loop through all the hosts...
for h in hosts:
self._update_vars_files_for_host(h)
@@ -196,11 +196,11 @@ class Play(object):
return True
if tags_counted > 0:
return False
return False
# didn't tag the play, and the play contains no steps
# so assume we just want to gather facts
return True
return True
# *************************************************
@@ -213,7 +213,7 @@ class Play(object):
if type(self.vars_files) != list:
self.vars_files = [ self.vars_files ]
if (host is not None):
inventory = self.playbook.inventory
hostrec = inventory.get_host(host)
@@ -288,8 +288,8 @@ class Play(object):
raise errors.AnsibleError("%s must be stored as dictonary/hash: %s" % filename4)
if host is not None and self._has_vars_in(filename2) and not self._has_vars_in(filename3):
# running a host specific pass and has host specific variables
# load into setup cache
# load into setup cache
self.playbook.SETUP_CACHE[host].update(new_vars)
elif host is None:
# running a non-host specific pass and we can update the global vars instead
# running a non-host specific pass and we can update the global vars instead
self.vars.update(new_vars)

View File

@@ -20,9 +20,9 @@ from ansible import utils
class Task(object):
__slots__ = [
__slots__ = [
'name', 'action', 'only_if', 'async_seconds', 'async_poll_interval',
'notify', 'module_name', 'module_args', 'module_vars',
'notify', 'module_name', 'module_args', 'module_vars',
'play', 'notified_by', 'tags', 'with_items', 'first_available_file', 'ignore_errors'
]
@@ -63,7 +63,7 @@ class Task(object):
self.first_available_file = ds.get('first_available_file', None)
self.with_items = ds.get('with_items', None)
self.ignore_errors = ds.get('ignore_errors', False)
# notify can be a string or a list, store as a list
if isinstance(self.notify, basestring):
self.notify = [ self.notify ]
@@ -92,8 +92,8 @@ class Task(object):
# make first_available_file accessable to Runner code
if self.first_available_file:
self.module_vars['first_available_file'] = self.first_available_file
# process with_items so it can be used by Runner code
# process with_items so it can be used by Runner code
if self.with_items is None:
self.with_items = [ ]
self.module_vars['items'] = self.with_items
@@ -109,4 +109,4 @@ class Task(object):
elif type(apply_tags) == list:
self.tags.extend(apply_tags)
self.tags.extend(import_tags)

View File

@@ -30,7 +30,7 @@ import codecs
import collections
import re
import ansible.constants as C
import ansible.constants as C
import ansible.inventory
from ansible import utils
from ansible import errors
@@ -38,7 +38,7 @@ from ansible import module_common
import poller
import connection
from ansible import callbacks as ans_callbacks
HAS_ATFORK=True
try:
from Crypto.Random import atfork
@@ -65,7 +65,7 @@ def _executor_hook(job_queue, result_queue):
pass
except:
traceback.print_exc()
################################################
class ReturnData(object):
@@ -103,7 +103,7 @@ class Runner(object):
# see bin/ansible for how this is used...
def __init__(self,
def __init__(self,
host_list=C.DEFAULT_HOST_LIST, # ex: /etc/ansible/hosts, legacy usage
module_path=C.DEFAULT_MODULE_PATH, # ex: /usr/share/ansible
module_name=C.DEFAULT_MODULE_NAME, # ex: copy
@@ -114,7 +114,7 @@ class Runner(object):
remote_user=C.DEFAULT_REMOTE_USER, # ex: 'username'
remote_pass=C.DEFAULT_REMOTE_PASS, # ex: 'password123' or None if using key
remote_port=C.DEFAULT_REMOTE_PORT, # if SSH on different ports
private_key_file=C.DEFAULT_PRIVATE_KEY_FILE, # if not using keys/passwords
private_key_file=C.DEFAULT_PRIVATE_KEY_FILE, # if not using keys/passwords
sudo_pass=C.DEFAULT_SUDO_PASS, # ex: 'password123' or None
background=0, # async poll every X seconds, else 0 for non-async
basedir=None, # directory of playbook, if applicable
@@ -125,7 +125,7 @@ class Runner(object):
verbose=False, # whether to show more or less
sudo=False, # whether to run sudo or not
sudo_user=C.DEFAULT_SUDO_USER, # ex: 'root'
module_vars=None, # a playbooks internals thing
module_vars=None, # a playbooks internals thing
is_playbook=False, # running from playbook or not?
inventory=None # reference to Inventory object
):
@@ -162,7 +162,7 @@ class Runner(object):
raise errors.AnsibleError("SSH transport does not support passwords, only keys or agents")
if self.transport == 'local':
self.remote_user = pwd.getpwuid(os.geteuid())[0]
# ensure we are using unique tmp paths
random.seed()
@@ -201,7 +201,7 @@ class Runner(object):
# *****************************************************
def _execute_module(self, conn, tmp, module_name, args,
def _execute_module(self, conn, tmp, module_name, args,
async_jid=None, async_module=None, async_limit=None, inject=None):
''' runs a module that has already been transferred '''
@@ -270,7 +270,7 @@ class Runner(object):
return self._execute_module(conn, tmp, 'async_wrapper', module_args,
async_module=module_path,
async_jid=self.generated_jid,
async_jid=self.generated_jid,
async_limit=self.background,
inject=inject
)
@@ -301,7 +301,7 @@ class Runner(object):
if not found:
results=dict(failed=True, msg="could not find src in first_available_file list")
return ReturnData(host=conn.host, results=results)
source = utils.template(source, inject)
source = utils.path_dwim(self.basedir, source)
@@ -309,10 +309,10 @@ class Runner(object):
if local_md5 is None:
result=dict(failed=True, msg="could not find src=%s" % source)
return ReturnData(host=conn.host, result=result)
remote_md5 = self._remote_md5(conn, tmp, dest)
exec_rc = None
remote_md5 = self._remote_md5(conn, tmp, dest)
exec_rc = None
if local_md5 != remote_md5:
# transfer the file to a remote tmp location
tmp_src = tmp + source.split('/')[-1]
@@ -344,7 +344,7 @@ class Runner(object):
source = utils.template(source, inject)
# apply templating to dest argument
dest = utils.template(dest, inject)
# files are saved in dest dir, with a subdir for each host, then the filename
dest = "%s/%s/%s" % (utils.path_dwim(self.basedir, dest), conn.host, source)
dest = dest.replace("//","/")
@@ -383,7 +383,7 @@ class Runner(object):
else:
result = dict(changed=False, md5sum=local_md5, file=source)
return ReturnData(host=conn.host, result=result)
# *****************************************************
def _execute_template(self, conn, tmp, inject=None):
@@ -423,7 +423,7 @@ class Runner(object):
result = dict(failed=True, msg=str(e))
return ReturnData(host=conn.host, comm_ok=False, result=result)
xfered = self._transfer_str(conn, tmp, 'source', resultant)
# run the copy module, queue the file module
self.module_args = "%s src=%s dest=%s" % (self.module_args, xfered, dest)
return self._execute_module(conn, tmp, 'copy', self.module_args, inject=inject).daisychain('file')
@@ -435,7 +435,7 @@ class Runner(object):
# FIXME: once assemble is ported over to the use the new common logic, this method
# will be unneccessary as it can decide to daisychain via it's own module returns.
# and this function can be deleted.
# and this function can be deleted.
return self._execute_module(conn, tmp, 'assemble', self.module_args, inject=inject).daisychain('file')
@@ -489,7 +489,7 @@ class Runner(object):
# hack for apt and soon yum, with_items maps back into a single module call
inject['item'] = ",".join(items)
items = []
if len(items) == 0:
return self._executor_internal_inner(host, inject, port)
else:
@@ -589,7 +589,7 @@ class Runner(object):
changed = True
result.result.update(result2.result)
result.result['changed'] = changed
del result.result['daisychain']
self._delete_remote_files(conn, tmp)
@@ -631,7 +631,7 @@ class Runner(object):
out = "\n".join(stdout.readlines())
else:
out = stdout
if type(stderr) != str:
err = "\n".join(stderr.readlines())
else:
@@ -642,15 +642,15 @@ class Runner(object):
# *****************************************************
def _remote_md5(self, conn, tmp, path):
''' takes a remote md5sum without requiring python, and returns 0 if no file '''
''' takes a remote md5sum without requiring python, and returns 0 if no file '''
test = "rc=0; [[ -r \"%s\" ]] || rc=2; [[ -f \"%s\" ]] || rc=1" % (path,path)
md5s = [
"(/usr/bin/md5sum %s 2>/dev/null)" % path,
"(/sbin/md5sum -q %s 2>/dev/null)" % path,
"(/usr/bin/digest -a md5 -v %s 2>/dev/null)" % path
]
cmd = " || ".join(md5s)
cmd = "%s; %s || (echo \"${rc} %s\")" % (test, cmd, path)
return self._low_level_exec_command(conn, cmd, tmp, sudoable=False).split()[0]
@@ -702,7 +702,7 @@ class Runner(object):
module_data = module_data.replace(module_common.REPLACER, module_common.MODULE_COMMON)
encoded_args = "\"\"\"%s\"\"\"" % utils.template(self.module_args, inject).replace("\"","\\\"")
module_data = module_data.replace(module_common.REPLACER_ARGS, encoded_args)
# use the correct python interpreter for the host
if 'ansible_python_interpreter' in inject:
interpreter = inject['ansible_python_interpreter']
@@ -771,13 +771,13 @@ class Runner(object):
def run(self):
''' xfer & run module on all matched hosts '''
# find hosts that match the pattern
hosts = self.inventory.list_hosts(self.pattern)
if len(hosts) == 0:
self.callbacks.on_no_hosts()
return dict(contacted={}, dark={})
hosts = [ (self,x) for x in hosts ]
results = None
if self.forks > 1:

View File

@@ -80,17 +80,17 @@ class ParamikoConnection(object):
bufsize = 4096
chan = self.ssh.get_transport().open_session()
chan.get_pty()
chan.get_pty()
if not self.runner.sudo or not sudoable:
quoted_command = '"$SHELL" -c ' + pipes.quote(cmd)
quoted_command = '"$SHELL" -c ' + pipes.quote(cmd)
chan.exec_command(quoted_command)
else:
# Rather than detect if sudo wants a password this time, -k makes
# Rather than detect if sudo wants a password this time, -k makes
# sudo always ask for a password if one is required. The "--"
# tells sudo that this is the end of sudo options and the command
# follows. Passing a quoted compound command to sudo (or sudo -s)
# directly doesn't work, so we shellquote it with pipes.quote()
# directly doesn't work, so we shellquote it with pipes.quote()
# and pass the quoted string to the user's shell. We loop reading
# output until we see the randomly-generated sudo prompt set with
# the -p option.

View File

@@ -58,11 +58,11 @@ class SSHConnection(object):
ssh_cmd = ["ssh", "-tt", "-q"] + self.common_args + [self.host]
if self.runner.sudo and sudoable:
# Rather than detect if sudo wants a password this time, -k makes
# Rather than detect if sudo wants a password this time, -k makes
# sudo always ask for a password if one is required. The "--"
# tells sudo that this is the end of sudo options and the command
# follows. Passing a quoted compound command to sudo (or sudo -s)
# directly doesn't work, so we shellquote it with pipes.quote()
# directly doesn't work, so we shellquote it with pipes.quote()
# and pass the quoted string to the user's shell. We loop reading
# output until we see the randomly-generated sudo prompt set with
# the -p option.
@@ -104,12 +104,12 @@ class SSHConnection(object):
stdout += os.read(p.stdout.fileno(), 1024)
# older versions of ssh generate this error which we ignore
stdout=stdout.replace("tcgetattr: Invalid argument\n", "")
# suppress Ubuntu 10.04/12.04 error on -tt option
# suppress Ubuntu 10.04/12.04 error on -tt option
stdout=stdout.replace("tcgetattr: Inappropriate ioctl for device\n","")
if p.returncode != 0 and stdout.find('Bad configuration option: ControlPersist') != -1:
raise errors.AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" before running again')
return ('', stdout, '')
def put_file(self, in_path, out_path):

View File

@@ -36,7 +36,7 @@ except ImportError:
try:
from hashlib import md5 as _md5
except ImportError:
except ImportError:
from md5 import md5 as _md5
###############################################################
@@ -117,10 +117,10 @@ def parse_json(data):
results = {}
try:
tokens = shlex.split(data)
except:
except:
print "failed to parse json: "+ data
raise
raise
for t in tokens:
if t.find("=") == -1:
raise errors.AnsibleError("failed to parse: %s" % data)
@@ -131,7 +131,7 @@ def parse_json(data):
elif value.lower() in [ 'false', '0' ]:
value = False
if key == 'rc':
value = int(value)
value = int(value)
results[key] = value
if len(results.keys()) == 0:
return { "failed" : True, "parsed" : False, "msg" : data }
@@ -204,7 +204,7 @@ def template(text, vars):
if (depth > 20):
raise errors.AnsibleError("template recursion depth exceeded")
prev_text = text
text = varReplace(unicode(text), vars)
text = varReplace(unicode(text), vars)
return text
def template_from_file(basedir, path, vars):
@@ -223,7 +223,7 @@ def parse_yaml(data):
''' convert a yaml string to a data structure '''
return yaml.load(data)
def parse_yaml_from_file(path):
''' convert a yaml file to a data structure '''
@@ -268,7 +268,7 @@ def default(value, function):
return value
####################################################################
# option handling code for /usr/bin/ansible and ansible-playbook
# option handling code for /usr/bin/ansible and ansible-playbook
# below this line
class SortedOptParser(optparse.OptionParser):
@@ -287,7 +287,7 @@ def base_parser(constants=C, usage="", output_opts=False, runas_opts=False, asyn
parser.add_option('-f','--forks', dest='forks', default=constants.DEFAULT_FORKS, type='int',
help="specify number of parallel processes to use (default=%s)" % constants.DEFAULT_FORKS)
parser.add_option('-i', '--inventory-file', dest='inventory',
help="specify inventory host file (default=%s)" % constants.DEFAULT_HOST_LIST,
help="specify inventory host file (default=%s)" % constants.DEFAULT_HOST_LIST,
default=constants.DEFAULT_HOST_LIST)
parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true',
help='ask for SSH password')
@@ -299,7 +299,7 @@ def base_parser(constants=C, usage="", output_opts=False, runas_opts=False, asyn
help="specify path(s) to module library (default=%s)" % constants.DEFAULT_MODULE_PATH,
default=constants.DEFAULT_MODULE_PATH)
parser.add_option('-T', '--timeout', default=constants.DEFAULT_TIMEOUT, type='int',
dest='timeout',
dest='timeout',
help="override the SSH timeout in seconds (default=%s)" % constants.DEFAULT_TIMEOUT)
if output_opts:
@@ -314,9 +314,9 @@ def base_parser(constants=C, usage="", output_opts=False, runas_opts=False, asyn
parser.add_option('-U', '--sudo-user', dest='sudo_user', help='desired sudo user (default=root)',
default=None) # Can't default to root because we need to detect when this option was given
parser.add_option('-u', '--user', default=constants.DEFAULT_REMOTE_USER,
dest='remote_user',
dest='remote_user',
help='connect as this user (default=%s)' % constants.DEFAULT_REMOTE_USER)
if connect_opts:
parser.add_option('-c', '--connection', dest='connection',
choices=C.DEFAULT_TRANSPORT_OPTS,
@@ -325,7 +325,7 @@ def base_parser(constants=C, usage="", output_opts=False, runas_opts=False, asyn
if async_opts:
parser.add_option('-P', '--poll', default=constants.DEFAULT_POLL_INTERVAL, type='int',
dest='poll_interval',
dest='poll_interval',
help="set the poll interval if using -B (default=%s)" % constants.DEFAULT_POLL_INTERVAL)
parser.add_option('-B', '--background', dest='seconds', type='int', default=0,
help='run asynchronously, failing after X seconds (default=N/A)')