summaryrefslogtreecommitdiff
path: root/lib/ansible
diff options
context:
space:
mode:
Diffstat (limited to 'lib/ansible')
-rw-r--r--lib/ansible/__init__.py11
-rw-r--r--lib/ansible/_vendor/__init__.py2
-rw-r--r--lib/ansible/cli/__init__.py3
-rw-r--r--lib/ansible/cli/adhoc.py22
-rw-r--r--lib/ansible/cli/arguments/option_helpers.py78
-rw-r--r--lib/ansible/cli/config.py4
-rw-r--r--lib/ansible/cli/console.py84
-rw-r--r--lib/ansible/cli/doc.py685
-rw-r--r--lib/ansible/cli/galaxy.py351
-rw-r--r--lib/ansible/cli/inventory.py13
-rw-r--r--lib/ansible/cli/playbook.py60
-rw-r--r--lib/ansible/cli/pull.py22
-rwxr-xr-xlib/ansible/cli/scripts/ansible_cli_stub.py16
-rwxr-xr-xlib/ansible/cli/scripts/ansible_connection_cli_stub.py2
-rw-r--r--lib/ansible/cli/vault.py11
-rw-r--r--lib/ansible/collections/list.py47
-rw-r--r--lib/ansible/compat/selectors/__init__.py2
-rw-r--r--lib/ansible/config/ansible_builtin_runtime.yml338
-rw-r--r--lib/ansible/config/base.yml323
-rw-r--r--lib/ansible/config/manager.py37
-rw-r--r--lib/ansible/constants.py100
-rw-r--r--lib/ansible/errors/__init__.py38
-rw-r--r--lib/ansible/executor/module_common.py187
-rw-r--r--lib/ansible/executor/play_iterator.py15
-rw-r--r--lib/ansible/executor/playbook_executor.py22
-rw-r--r--lib/ansible/executor/powershell/coverage_wrapper.ps18
-rw-r--r--lib/ansible/executor/powershell/module_manifest.py4
-rw-r--r--lib/ansible/executor/process/worker.py27
-rw-r--r--lib/ansible/executor/task_executor.py193
-rw-r--r--lib/ansible/executor/task_queue_manager.py84
-rw-r--r--lib/ansible/galaxy/api.py322
-rw-r--r--lib/ansible/galaxy/collection.py1551
-rw-r--r--lib/ansible/galaxy/collection/__init__.py1382
-rw-r--r--lib/ansible/galaxy/collection/concrete_artifact_manager.py654
-rw-r--r--lib/ansible/galaxy/collection/galaxy_api_proxy.py107
-rw-r--r--lib/ansible/galaxy/dependency_resolution/__init__.py56
-rw-r--r--lib/ansible/galaxy/dependency_resolution/dataclasses.py427
-rw-r--r--lib/ansible/galaxy/dependency_resolution/errors.py12
-rw-r--r--lib/ansible/galaxy/dependency_resolution/providers.py332
-rw-r--r--lib/ansible/galaxy/dependency_resolution/reporters.py17
-rw-r--r--lib/ansible/galaxy/dependency_resolution/resolvers.py17
-rw-r--r--lib/ansible/galaxy/dependency_resolution/versioning.py70
-rw-r--r--lib/ansible/galaxy/role.py6
-rw-r--r--lib/ansible/galaxy/token.py6
-rw-r--r--lib/ansible/inventory/data.py3
-rw-r--r--lib/ansible/inventory/group.py1
-rw-r--r--lib/ansible/inventory/manager.py13
-rw-r--r--lib/ansible/keyword_desc.yml79
-rw-r--r--lib/ansible/module_utils/api.py50
-rw-r--r--lib/ansible/module_utils/basic.py923
-rw-r--r--lib/ansible/module_utils/common/arg_spec.py286
-rw-r--r--lib/ansible/module_utils/common/parameters.py890
-rw-r--r--lib/ansible/module_utils/common/process.py2
-rw-r--r--lib/ansible/module_utils/common/respawn.py98
-rw-r--r--lib/ansible/module_utils/common/validation.py227
-rw-r--r--lib/ansible/module_utils/compat/selinux.py113
-rw-r--r--lib/ansible/module_utils/errors.py119
-rw-r--r--lib/ansible/module_utils/facts/ansible_collector.py10
-rw-r--r--lib/ansible/module_utils/facts/hardware/linux.py9
-rw-r--r--lib/ansible/module_utils/facts/hardware/openbsd.py46
-rw-r--r--lib/ansible/module_utils/facts/network/hpux.py2
-rw-r--r--lib/ansible/module_utils/facts/sysctl.py42
-rw-r--r--lib/ansible/module_utils/facts/system/date_time.py1
-rw-r--r--lib/ansible/module_utils/facts/system/distribution.py108
-rw-r--r--lib/ansible/module_utils/facts/system/local.py50
-rw-r--r--lib/ansible/module_utils/facts/system/pkg_mgr.py6
-rw-r--r--lib/ansible/module_utils/facts/system/selinux.py2
-rw-r--r--lib/ansible/module_utils/facts/system/user.py5
-rw-r--r--lib/ansible/module_utils/facts/timeout.py2
-rw-r--r--lib/ansible/module_utils/facts/utils.py41
-rw-r--r--lib/ansible/module_utils/facts/virtual/base.py16
-rw-r--r--lib/ansible/module_utils/facts/virtual/freebsd.py30
-rw-r--r--lib/ansible/module_utils/facts/virtual/hpux.py10
-rw-r--r--lib/ansible/module_utils/facts/virtual/linux.py315
-rw-r--r--lib/ansible/module_utils/facts/virtual/netbsd.py29
-rw-r--r--lib/ansible/module_utils/facts/virtual/openbsd.py12
-rw-r--r--lib/ansible/module_utils/facts/virtual/sunos.py27
-rw-r--r--lib/ansible/module_utils/facts/virtual/sysctl.py83
-rw-r--r--lib/ansible/module_utils/json_utils.py4
-rw-r--r--lib/ansible/module_utils/six/__init__.py20
-rw-r--r--lib/ansible/module_utils/urls.py166
-rw-r--r--lib/ansible/module_utils/yumdnf.py10
-rw-r--r--lib/ansible/modules/add_host.py1
-rw-r--r--lib/ansible/modules/apt.py162
-rw-r--r--lib/ansible/modules/apt_key.py281
-rw-r--r--lib/ansible/modules/apt_repository.py114
-rw-r--r--lib/ansible/modules/async_wrapper.py8
-rw-r--r--lib/ansible/modules/blockinfile.py9
-rw-r--r--lib/ansible/modules/command.py139
-rw-r--r--lib/ansible/modules/copy.py13
-rw-r--r--lib/ansible/modules/cron.py28
-rw-r--r--lib/ansible/modules/dnf.py179
-rw-r--r--lib/ansible/modules/dpkg_selections.py6
-rw-r--r--lib/ansible/modules/expect.py35
-rw-r--r--lib/ansible/modules/file.py50
-rw-r--r--lib/ansible/modules/find.py64
-rw-r--r--lib/ansible/modules/gather_facts.py4
-rw-r--r--lib/ansible/modules/get_url.py49
-rw-r--r--lib/ansible/modules/getent.py6
-rw-r--r--lib/ansible/modules/git.py82
-rw-r--r--lib/ansible/modules/group_by.py3
-rw-r--r--lib/ansible/modules/hostname.py207
-rw-r--r--lib/ansible/modules/import_role.py9
-rw-r--r--lib/ansible/modules/include_role.py15
-rw-r--r--lib/ansible/modules/iptables.py72
-rw-r--r--lib/ansible/modules/known_hosts.py4
-rw-r--r--lib/ansible/modules/lineinfile.py83
-rw-r--r--lib/ansible/modules/package.py14
-rw-r--r--lib/ansible/modules/package_facts.py55
-rw-r--r--lib/ansible/modules/pip.py1
-rw-r--r--lib/ansible/modules/reboot.py18
-rw-r--r--lib/ansible/modules/replace.py10
-rw-r--r--lib/ansible/modules/rpm_key.py4
-rw-r--r--lib/ansible/modules/service.py13
-rw-r--r--lib/ansible/modules/service_facts.py84
-rw-r--r--lib/ansible/modules/set_fact.py53
-rw-r--r--lib/ansible/modules/set_stats.py12
-rw-r--r--lib/ansible/modules/setup.py60
-rw-r--r--lib/ansible/modules/slurp.py40
-rw-r--r--lib/ansible/modules/stat.py8
-rw-r--r--lib/ansible/modules/subversion.py60
-rw-r--r--lib/ansible/modules/systemd.py62
-rw-r--r--lib/ansible/modules/sysvinit.py9
-rw-r--r--lib/ansible/modules/unarchive.py193
-rw-r--r--lib/ansible/modules/uri.py33
-rw-r--r--lib/ansible/modules/user.py80
-rw-r--r--lib/ansible/modules/validate_argument_spec.py96
-rw-r--r--lib/ansible/modules/wait_for.py63
-rw-r--r--lib/ansible/modules/yum.py30
-rw-r--r--lib/ansible/modules/yum_repository.py67
-rw-r--r--lib/ansible/parsing/dataloader.py38
-rw-r--r--lib/ansible/parsing/mod_args.py4
-rw-r--r--lib/ansible/parsing/splitter.py5
-rw-r--r--lib/ansible/parsing/utils/yaml.py9
-rw-r--r--lib/ansible/parsing/vault/__init__.py18
-rw-r--r--lib/ansible/parsing/yaml/constructor.py17
-rw-r--r--lib/ansible/parsing/yaml/objects.py2
-rw-r--r--lib/ansible/playbook/base.py7
-rw-r--r--lib/ansible/playbook/block.py11
-rw-r--r--lib/ansible/playbook/conditional.py57
-rw-r--r--lib/ansible/playbook/helpers.py2
-rw-r--r--lib/ansible/playbook/included_file.py102
-rw-r--r--lib/ansible/playbook/play.py3
-rw-r--r--lib/ansible/playbook/play_context.py17
-rw-r--r--lib/ansible/playbook/playbook_include.py30
-rw-r--r--lib/ansible/playbook/role/__init__.py123
-rw-r--r--lib/ansible/playbook/role/metadata.py1
-rw-r--r--lib/ansible/playbook/role/requirement.py4
-rw-r--r--lib/ansible/playbook/role_include.py5
-rw-r--r--lib/ansible/playbook/task.py9
-rw-r--r--lib/ansible/playbook/task_include.py2
-rw-r--r--lib/ansible/plugins/action/__init__.py256
-rw-r--r--lib/ansible/plugins/action/command.py2
-rw-r--r--lib/ansible/plugins/action/gather_facts.py12
-rw-r--r--lib/ansible/plugins/action/package.py7
-rw-r--r--lib/ansible/plugins/action/pause.py40
-rw-r--r--lib/ansible/plugins/action/reboot.py111
-rw-r--r--lib/ansible/plugins/action/service.py3
-rw-r--r--lib/ansible/plugins/action/set_fact.py25
-rw-r--r--lib/ansible/plugins/action/template.py25
-rw-r--r--lib/ansible/plugins/action/validate_argument_spec.py94
-rw-r--r--lib/ansible/plugins/become/runas.py2
-rw-r--r--lib/ansible/plugins/become/su.py3
-rw-r--r--lib/ansible/plugins/become/sudo.py2
-rw-r--r--lib/ansible/plugins/cache/__init__.py3
-rw-r--r--lib/ansible/plugins/cache/jsonfile.py3
-rw-r--r--lib/ansible/plugins/cache/memory.py2
-rw-r--r--lib/ansible/plugins/callback/__init__.py13
-rw-r--r--lib/ansible/plugins/callback/default.py61
-rw-r--r--lib/ansible/plugins/callback/junit.py4
-rw-r--r--lib/ansible/plugins/callback/minimal.py2
-rw-r--r--lib/ansible/plugins/callback/oneline.py2
-rw-r--r--lib/ansible/plugins/callback/tree.py43
-rw-r--r--lib/ansible/plugins/connection/local.py13
-rw-r--r--lib/ansible/plugins/connection/paramiko_ssh.py4
-rw-r--r--lib/ansible/plugins/connection/psrp.py51
-rw-r--r--lib/ansible/plugins/connection/ssh.py201
-rw-r--r--lib/ansible/plugins/connection/winrm.py32
-rw-r--r--lib/ansible/plugins/doc_fragments/action_common_attributes.py18
-rw-r--r--lib/ansible/plugins/doc_fragments/connection_pipelining.py29
-rw-r--r--lib/ansible/plugins/doc_fragments/constructed.py21
-rw-r--r--lib/ansible/plugins/doc_fragments/default_callback.py13
-rw-r--r--lib/ansible/plugins/doc_fragments/files.py5
-rw-r--r--lib/ansible/plugins/doc_fragments/shell_common.py27
-rw-r--r--lib/ansible/plugins/doc_fragments/shell_windows.py9
-rw-r--r--lib/ansible/plugins/doc_fragments/url.py11
-rw-r--r--lib/ansible/plugins/filter/core.py30
-rw-r--r--lib/ansible/plugins/filter/mathstuff.py50
-rw-r--r--lib/ansible/plugins/inventory/__init__.py41
-rw-r--r--lib/ansible/plugins/inventory/advanced_host_list.py2
-rw-r--r--lib/ansible/plugins/inventory/auto.py1
-rw-r--r--lib/ansible/plugins/inventory/constructed.py54
-rw-r--r--lib/ansible/plugins/inventory/generator.py1
-rw-r--r--lib/ansible/plugins/inventory/host_list.py2
-rw-r--r--lib/ansible/plugins/inventory/ini.py66
-rw-r--r--lib/ansible/plugins/inventory/script.py2
-rw-r--r--lib/ansible/plugins/inventory/toml.py114
-rw-r--r--lib/ansible/plugins/inventory/yaml.py2
-rw-r--r--lib/ansible/plugins/loader.py94
-rw-r--r--lib/ansible/plugins/lookup/config.py2
-rw-r--r--lib/ansible/plugins/lookup/csvfile.py27
-rw-r--r--lib/ansible/plugins/lookup/dict.py2
-rw-r--r--lib/ansible/plugins/lookup/env.py2
-rw-r--r--lib/ansible/plugins/lookup/file.py2
-rw-r--r--lib/ansible/plugins/lookup/fileglob.py7
-rw-r--r--lib/ansible/plugins/lookup/first_found.py55
-rw-r--r--lib/ansible/plugins/lookup/indexed_items.py2
-rw-r--r--lib/ansible/plugins/lookup/ini.py23
-rw-r--r--lib/ansible/plugins/lookup/inventory_hostnames.py47
-rw-r--r--lib/ansible/plugins/lookup/items.py2
-rw-r--r--lib/ansible/plugins/lookup/lines.py2
-rw-r--r--lib/ansible/plugins/lookup/list.py2
-rw-r--r--lib/ansible/plugins/lookup/nested.py2
-rw-r--r--lib/ansible/plugins/lookup/password.py34
-rw-r--r--lib/ansible/plugins/lookup/pipe.py2
-rw-r--r--lib/ansible/plugins/lookup/random_choice.py2
-rw-r--r--lib/ansible/plugins/lookup/sequence.py7
-rw-r--r--lib/ansible/plugins/lookup/subelements.py2
-rw-r--r--lib/ansible/plugins/lookup/template.py47
-rw-r--r--lib/ansible/plugins/lookup/together.py2
-rw-r--r--lib/ansible/plugins/lookup/unvault.py2
-rw-r--r--lib/ansible/plugins/lookup/url.py9
-rw-r--r--lib/ansible/plugins/lookup/varnames.py4
-rw-r--r--lib/ansible/plugins/lookup/vars.py2
-rw-r--r--lib/ansible/plugins/netconf/__init__.py3
-rw-r--r--lib/ansible/plugins/shell/__init__.py7
-rw-r--r--lib/ansible/plugins/shell/cmd.py1
-rw-r--r--lib/ansible/plugins/shell/powershell.py1
-rw-r--r--lib/ansible/plugins/shell/sh.py1
-rw-r--r--lib/ansible/plugins/strategy/__init__.py123
-rw-r--r--lib/ansible/plugins/strategy/debug.py2
-rw-r--r--lib/ansible/plugins/strategy/free.py7
-rw-r--r--lib/ansible/plugins/strategy/host_pinned.py2
-rw-r--r--lib/ansible/plugins/strategy/linear.py14
-rw-r--r--lib/ansible/plugins/test/core.py39
-rw-r--r--lib/ansible/plugins/vars/host_group_vars.py2
-rw-r--r--lib/ansible/release.py4
-rw-r--r--lib/ansible/template/__init__.py322
-rw-r--r--lib/ansible/template/native_helpers.py5
-rw-r--r--lib/ansible/template/safe_eval.py6
-rw-r--r--lib/ansible/template/template.py16
-rw-r--r--lib/ansible/utils/collection_loader/_collection_config.py2
-rw-r--r--lib/ansible/utils/collection_loader/_collection_finder.py150
-rw-r--r--lib/ansible/utils/color.py17
-rw-r--r--lib/ansible/utils/display.py110
-rw-r--r--lib/ansible/utils/encrypt.py75
-rw-r--r--lib/ansible/utils/listify.py1
-rw-r--r--lib/ansible/utils/lock.py43
-rw-r--r--lib/ansible/utils/native_jinja.py13
-rw-r--r--lib/ansible/utils/plugin_docs.py2
-rw-r--r--lib/ansible/utils/ssh_functions.py3
-rw-r--r--lib/ansible/utils/unsafe_proxy.py7
-rw-r--r--lib/ansible/vars/manager.py57
-rw-r--r--lib/ansible/vars/reserved.py9
254 files changed, 12193 insertions, 5501 deletions
diff --git a/lib/ansible/__init__.py b/lib/ansible/__init__.py
index e4905a18..f6334283 100644
--- a/lib/ansible/__init__.py
+++ b/lib/ansible/__init__.py
@@ -22,6 +22,17 @@ __metaclass__ = type
# make vendored top-level modules accessible EARLY
import ansible._vendor
+# patch Jinja2 >= 3.0 for backwards compatibility
+try:
+ import sys as _sys
+ from jinja2.filters import pass_context as _passctx, pass_environment as _passenv, pass_eval_context as _passevalctx
+ _mod = _sys.modules['jinja2.filters']
+ _mod.contextfilter = _passctx
+ _mod.environmentfilter = _passenv
+ _mod.evalcontextfilter = _passevalctx
+except ImportError:
+ _sys = None
+
# Note: Do not add any code to this file. The ansible module may be
# a namespace package when using Ansible-2.1+ Anything in this file may not be
# available if one of the other packages in the namespace is loaded first.
diff --git a/lib/ansible/_vendor/__init__.py b/lib/ansible/_vendor/__init__.py
index e6a4c56d..a31957b6 100644
--- a/lib/ansible/_vendor/__init__.py
+++ b/lib/ansible/_vendor/__init__.py
@@ -39,7 +39,7 @@ def _ensure_vendored_path_entry():
already_loaded_vendored_modules = set(sys.modules.keys()).intersection(vendored_module_names)
if already_loaded_vendored_modules:
- warnings.warn('One or more Python packages bundled by this ansible-base distribution were already '
+ warnings.warn('One or more Python packages bundled by this ansible-core distribution were already '
'loaded ({0}). This may result in undefined behavior.'.format(', '.join(sorted(already_loaded_vendored_modules))))
diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py
index 86a7e521..112d892d 100644
--- a/lib/ansible/cli/__init__.py
+++ b/lib/ansible/cli/__init__.py
@@ -311,6 +311,9 @@ class CLI(with_metaclass(ABCMeta, object)):
# process tags
if hasattr(options, 'tags') and not options.tags:
# optparse defaults does not do what's expected
+ # More specifically, we want `--tags` to be additive. So we cannot
+ # simply change C.TAGS_RUN's default to ["all"] because then passing
+ # --tags foo would cause us to have ['all', 'foo']
options.tags = ['all']
if hasattr(options, 'tags') and options.tags:
tags = set()
diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py
index fd39f1a5..4ce876a6 100644
--- a/lib/ansible/cli/adhoc.py
+++ b/lib/ansible/cli/adhoc.py
@@ -28,10 +28,8 @@ class AdHocCLI(CLI):
def init_parser(self):
''' create an options parser for bin/ansible '''
super(AdHocCLI, self).init_parser(usage='%prog <host-pattern> [options]',
- desc="Define and run a single task 'playbook' against"
- " a set of hosts",
- epilog="Some modules do not make sense in Ad-Hoc (include,"
- " meta, etc)")
+ desc="Define and run a single task 'playbook' against a set of hosts",
+ epilog="Some actions do not make sense in Ad-Hoc (include, meta, etc)")
opt_help.add_runas_options(self.parser)
opt_help.add_inventory_options(self.parser)
@@ -44,12 +42,14 @@ class AdHocCLI(CLI):
opt_help.add_fork_options(self.parser)
opt_help.add_module_options(self.parser)
opt_help.add_basedir_options(self.parser)
+ opt_help.add_tasknoplay_options(self.parser)
# options unique to ansible ad-hoc
self.parser.add_argument('-a', '--args', dest='module_args',
- help="module arguments", default=C.DEFAULT_MODULE_ARGS)
+ help="The action's options in space separated k=v format: -a 'opt1=val1 opt2=val2'",
+ default=C.DEFAULT_MODULE_ARGS)
self.parser.add_argument('-m', '--module-name', dest='module_name',
- help="module name to execute (default=%s)" % C.DEFAULT_MODULE_NAME,
+ help="Name of the action to execute (default=%s)" % C.DEFAULT_MODULE_NAME,
default=C.DEFAULT_MODULE_NAME)
self.parser.add_argument('args', metavar='pattern', help='host pattern')
@@ -66,7 +66,8 @@ class AdHocCLI(CLI):
def _play_ds(self, pattern, async_val, poll):
check_raw = context.CLIARGS['module_name'] in C.MODULE_REQUIRE_ARGS
- mytask = {'action': {'module': context.CLIARGS['module_name'], 'args': parse_kv(context.CLIARGS['module_args'], check_raw=check_raw)}}
+ mytask = {'action': {'module': context.CLIARGS['module_name'], 'args': parse_kv(context.CLIARGS['module_args'], check_raw=check_raw)},
+ 'timeout': context.CLIARGS['task_timeout']}
# avoid adding to tasks that don't support it, unless set, then give user an error
if context.CLIARGS['module_name'] not in C._ACTION_ALL_INCLUDE_ROLE_TASKS and any(frozenset((async_val, poll))):
@@ -87,6 +88,7 @@ class AdHocCLI(CLI):
# only thing left should be host pattern
pattern = to_text(context.CLIARGS['args'], errors='surrogate_or_strict')
+ # handle password prompts
sshpass = None
becomepass = None
@@ -96,6 +98,7 @@ class AdHocCLI(CLI):
# get basic objects
loader, inventory, variable_manager = self._play_prereqs()
+ # get list of hosts to execute against
try:
hosts = self.get_host_list(inventory, context.CLIARGS['subset'], pattern)
except AnsibleError:
@@ -105,12 +108,14 @@ class AdHocCLI(CLI):
hosts = []
display.warning("No hosts matched, nothing to do")
+ # just listing hosts?
if context.CLIARGS['listhosts']:
display.display(' hosts (%d):' % len(hosts))
for host in hosts:
display.display(' %s' % host)
return 0
+ # verify we have arguments if we know we need em
if context.CLIARGS['module_name'] in C.MODULE_REQUIRE_ARGS and not context.CLIARGS['module_args']:
err = "No argument passed to %s module" % context.CLIARGS['module_name']
if pattern.endswith(".yml"):
@@ -122,6 +127,7 @@ class AdHocCLI(CLI):
raise AnsibleOptionsError("'%s' is not a valid action for ad-hoc commands"
% context.CLIARGS['module_name'])
+ # construct playbook objects to wrap task
play_ds = self._play_ds(pattern, context.CLIARGS['seconds'], context.CLIARGS['poll_interval'])
play = Play().load(play_ds, variable_manager=variable_manager, loader=loader)
@@ -142,7 +148,7 @@ class AdHocCLI(CLI):
run_tree = False
if context.CLIARGS['tree']:
- C.DEFAULT_CALLBACK_WHITELIST.append('tree')
+ C.CALLBACKS_ENABLED.append('tree')
C.TREE_DIR = context.CLIARGS['tree']
run_tree = True
diff --git a/lib/ansible/cli/arguments/option_helpers.py b/lib/ansible/cli/arguments/option_helpers.py
index e18cd6ce..7492e781 100644
--- a/lib/ansible/cli/arguments/option_helpers.py
+++ b/lib/ansible/cli/arguments/option_helpers.py
@@ -13,6 +13,14 @@ import sys
import time
import yaml
+try:
+ import _yaml
+ HAS_LIBYAML = True
+except ImportError:
+ HAS_LIBYAML = False
+
+from jinja2 import __version__ as j2_version
+
import ansible
from ansible import constants as C
from ansible.module_utils._text import to_native
@@ -98,6 +106,15 @@ def unfrack_path(pathsep=False):
return inner
+def maybe_unfrack_path(beacon):
+
+ def inner(value):
+ if value.startswith(beacon):
+ return beacon + unfrackpath(value[1:])
+ return value
+ return inner
+
+
def _git_repo_info(repo_path):
""" returns a string containing git branch, commit id and commit date """
result = None
@@ -141,46 +158,34 @@ def _git_repo_info(repo_path):
def _gitinfo():
- basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..')
+ basedir = os.path.normpath(os.path.join(os.path.dirname(__file__), '..', '..', '..', '..'))
repo_path = os.path.join(basedir, '.git')
- result = _git_repo_info(repo_path)
- submodules = os.path.join(basedir, '.gitmodules')
-
- if not os.path.exists(submodules):
- return result
-
- with open(submodules) as f:
- for line in f:
- tokens = line.strip().split(' ')
- if tokens[0] == 'path':
- submodule_path = tokens[2]
- submodule_info = _git_repo_info(os.path.join(basedir, submodule_path, '.git'))
- if not submodule_info:
- submodule_info = ' not found - use git submodule update --init ' + submodule_path
- result += "\n {0}: {1}".format(submodule_path, submodule_info)
- return result
+ return _git_repo_info(repo_path)
def version(prog=None):
""" return ansible version """
if prog:
- result = " ".join((prog, __version__))
+ result = ["{0} [core {1}] ".format(prog, __version__)]
else:
- result = __version__
+ result = [__version__]
gitinfo = _gitinfo()
if gitinfo:
- result = result + " {0}".format(gitinfo)
- result += "\n config file = %s" % C.CONFIG_FILE
+ result[0] = "{0} {1}".format(result[0], gitinfo)
+ result.append(" config file = %s" % C.CONFIG_FILE)
if C.DEFAULT_MODULE_PATH is None:
cpath = "Default w/o overrides"
else:
cpath = C.DEFAULT_MODULE_PATH
- result = result + "\n configured module search path = %s" % cpath
- result = result + "\n ansible python module location = %s" % ':'.join(ansible.__path__)
- result = result + "\n executable location = %s" % sys.argv[0]
- result = result + "\n python version = %s" % ''.join(sys.version.splitlines())
- return result
+ result.append(" configured module search path = %s" % cpath)
+ result.append(" ansible python module location = %s" % ':'.join(ansible.__path__))
+ result.append(" ansible collection location = %s" % ':'.join(C.COLLECTIONS_PATHS))
+ result.append(" executable location = %s" % sys.argv[0])
+ result.append(" python version = %s" % ''.join(sys.version.splitlines()))
+ result.append(" jinja version = %s" % j2_version)
+ result.append(" libyaml = %s" % HAS_LIBYAML)
+ return "\n".join(result)
#
@@ -225,7 +230,8 @@ def add_basedir_options(parser):
"""Add options for commands which can set a playbook basedir"""
parser.add_argument('--playbook-dir', default=C.config.get_config_value('PLAYBOOK_DIR'), dest='basedir', action='store',
help="Since this tool does not use playbooks, use this as a substitute playbook directory."
- "This sets the relative path for many features including roles/ group_vars/ etc.")
+ "This sets the relative path for many features including roles/ group_vars/ etc.",
+ type=unfrack_path())
def add_check_options(parser):
@@ -253,13 +259,15 @@ def add_connect_options(parser):
help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT)
connect_group.add_argument('-T', '--timeout', default=C.DEFAULT_TIMEOUT, type=int, dest='timeout',
help="override the connection timeout in seconds (default=%s)" % C.DEFAULT_TIMEOUT)
- connect_group.add_argument('--ssh-common-args', default='', dest='ssh_common_args',
+
+ # ssh only
+ connect_group.add_argument('--ssh-common-args', default=None, dest='ssh_common_args',
help="specify common arguments to pass to sftp/scp/ssh (e.g. ProxyCommand)")
- connect_group.add_argument('--sftp-extra-args', default='', dest='sftp_extra_args',
+ connect_group.add_argument('--sftp-extra-args', default=None, dest='sftp_extra_args',
help="specify extra arguments to pass to sftp only (e.g. -f, -l)")
- connect_group.add_argument('--scp-extra-args', default='', dest='scp_extra_args',
+ connect_group.add_argument('--scp-extra-args', default=None, dest='scp_extra_args',
help="specify extra arguments to pass to scp only (e.g. -l)")
- connect_group.add_argument('--ssh-extra-args', default='', dest='ssh_extra_args',
+ connect_group.add_argument('--ssh-extra-args', default=None, dest='ssh_extra_args',
help="specify extra arguments to pass to ssh only (e.g. -R)")
parser.add_argument_group(connect_group)
@@ -346,10 +354,16 @@ def add_runas_prompt_options(parser, runas_group=None):
def add_runtask_options(parser):
"""Add options for commands that run a task"""
- parser.add_argument('-e', '--extra-vars', dest="extra_vars", action="append",
+ parser.add_argument('-e', '--extra-vars', dest="extra_vars", action="append", type=maybe_unfrack_path('@'),
help="set additional variables as key=value or YAML/JSON, if filename prepend with @", default=[])
+def add_tasknoplay_options(parser):
+ """Add options for commands that run a task w/o a defined play"""
+ parser.add_argument('--task-timeout', type=int, dest="task_timeout", action="store", default=C.TASK_TIMEOUT,
+ help="set task timeout limit in seconds, must be positive integer.")
+
+
def add_subset_options(parser):
"""Add options for commands which can run a subset of tasks"""
parser.add_argument('-t', '--tags', dest='tags', default=C.TAGS_RUN, action='append',
diff --git a/lib/ansible/cli/config.py b/lib/ansible/cli/config.py
index a3f84456..286aad7e 100644
--- a/lib/ansible/cli/config.py
+++ b/lib/ansible/cli/config.py
@@ -159,7 +159,7 @@ class ConfigCLI(CLI):
'''
list all current configs reading lib/constants.py and shows env and config file setting names
'''
- self.pager(to_text(yaml.dump(self.config.get_configuration_definitions(), Dumper=AnsibleDumper), errors='surrogate_or_strict'))
+ self.pager(to_text(yaml.dump(self.config.get_configuration_definitions(ignore_private=True), Dumper=AnsibleDumper), errors='surrogate_or_strict'))
def execute_dump(self):
'''
@@ -167,7 +167,7 @@ class ConfigCLI(CLI):
'''
# FIXME: deal with plugins, not just base config
text = []
- defaults = self.config.get_configuration_definitions().copy()
+ defaults = self.config.get_configuration_definitions(ignore_private=True).copy()
for setting in self.config.data.get_settings():
if setting.name in defaults:
defaults[setting.name] = setting
diff --git a/lib/ansible/cli/console.py b/lib/ansible/cli/console.py
index 2383ebdd..e2874896 100644
--- a/lib/ansible/cli/console.py
+++ b/lib/ansible/cli/console.py
@@ -6,17 +6,6 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-########################################################
-# ansible-console is an interactive REPL shell for ansible
-# with built-in tab completion for all the documented modules
-#
-# Available commands:
-# cd - change host/group (you can use host patterns eg.: app*.dc*:!app01*)
-# list - list available hosts in the current path
-# forks - change fork
-# become - become
-# ! - forces shell module instead of the ansible module (!yum update -y)
-
import atexit
import cmd
import getpass
@@ -42,7 +31,30 @@ display = Display()
class ConsoleCLI(CLI, cmd.Cmd):
- ''' a REPL that allows for running ad-hoc tasks against a chosen inventory (based on dominis' ansible-shell).'''
+ '''
+ A REPL that allows for running ad-hoc tasks against a chosen inventory
+ from a nice shell with built-in tab completion (based on dominis'
+ ansible-shell).
+
+ It supports several commands, and you can modify its configuration at
+ runtime:
+
+ - `cd [pattern]`: change host/group (you can use host patterns eg.: app*.dc*:!app01*)
+ - `list`: list available hosts in the current path
+ - `list groups`: list groups included in the current path
+ - `become`: toggle the become flag
+ - `!`: forces shell module instead of the ansible module (!yum update -y)
+ - `verbosity [num]`: set the verbosity level
+ - `forks [num]`: set the number of forks
+ - `become_user [user]`: set the become_user
+ - `remote_user [user]`: set the remote_user
+ - `become_method [method]`: set the privilege escalation method
+ - `check [bool]`: toggle check mode
+ - `diff [bool]`: toggle diff mode
+ - `timeout [integer]`: set the timeout of tasks in seconds (0 to disable)
+ - `help [command/module]`: display documentation for the command or module
+ - `exit`: exit ansible-console
+ '''
modules = []
ARGUMENTS = {'host-pattern': 'A name of a group in the inventory, a shell-like glob '
@@ -55,7 +67,7 @@ class ConsoleCLI(CLI, cmd.Cmd):
super(ConsoleCLI, self).__init__(args)
- self.intro = 'Welcome to the ansible console.\nType help or ? to list commands.\n'
+ self.intro = 'Welcome to the ansible console. Type help or ? to list commands.\n'
self.groups = []
self.hosts = []
@@ -75,13 +87,14 @@ class ConsoleCLI(CLI, cmd.Cmd):
self.check_mode = None
self.diff = None
self.forks = None
+ self.task_timeout = None
cmd.Cmd.__init__(self)
def init_parser(self):
super(ConsoleCLI, self).init_parser(
desc="REPL console for executing Ansible tasks.",
- epilog="This is not a live session/connection, each task executes in the background and returns it's results."
+ epilog="This is not a live session/connection: each task is executed in the background and returns its results."
)
opt_help.add_runas_options(self.parser)
opt_help.add_inventory_options(self.parser)
@@ -91,6 +104,8 @@ class ConsoleCLI(CLI, cmd.Cmd):
opt_help.add_fork_options(self.parser)
opt_help.add_module_options(self.parser)
opt_help.add_basedir_options(self.parser)
+ opt_help.add_runtask_options(self.parser)
+ opt_help.add_tasknoplay_options(self.parser)
# options unique to shell
self.parser.add_argument('pattern', help='host pattern', metavar='pattern', default='all', nargs='?')
@@ -109,7 +124,12 @@ class ConsoleCLI(CLI, cmd.Cmd):
def cmdloop(self):
try:
cmd.Cmd.cmdloop(self)
+
except KeyboardInterrupt:
+ self.cmdloop()
+
+ except EOFError:
+ self.display("[Ansible-console was exited]")
self.do_exit(self)
def set_prompt(self):
@@ -147,7 +167,7 @@ class ConsoleCLI(CLI, cmd.Cmd):
self._find_modules_in_path(module)
elif module.startswith('__'):
continue
- elif any(module.endswith(x) for x in C.BLACKLIST_EXTS):
+ elif any(module.endswith(x) for x in C.REJECT_EXTS):
continue
elif module in C.IGNORE_FILES:
continue
@@ -183,11 +203,12 @@ class ConsoleCLI(CLI, cmd.Cmd):
result = None
try:
check_raw = module in C._ACTION_ALLOWS_RAW_ARGS
+ task = dict(action=dict(module=module, args=parse_kv(module_args, check_raw=check_raw)), timeout=self.task_timeout)
play_ds = dict(
name="Ansible Shell",
hosts=self.cwd,
gather_facts='no',
- tasks=[dict(action=dict(module=module, args=parse_kv(module_args, check_raw=check_raw)))],
+ tasks=[task],
remote_user=self.remote_user,
become=self.become,
become_user=self.become_user,
@@ -272,8 +293,11 @@ class ConsoleCLI(CLI, cmd.Cmd):
if not arg:
display.display('Usage: verbosity <number>')
else:
- display.verbosity = int(arg)
- display.v('verbosity level set to %s' % arg)
+ try:
+ display.verbosity = int(arg)
+ display.v('verbosity level set to %s' % arg)
+ except (TypeError, ValueError) as e:
+ display.error('The verbosity must be a valid integer: %s' % to_text(e))
def do_cd(self, arg):
"""
@@ -337,26 +361,43 @@ class ConsoleCLI(CLI, cmd.Cmd):
display.v("become_method changed to %s" % self.become_method)
else:
display.display("Please specify a become_method, e.g. `become_method su`")
+ display.v("Current become_method is %s" % self.become_method)
def do_check(self, arg):
"""Toggle whether plays run with check mode"""
if arg:
self.check_mode = boolean(arg, strict=False)
- display.v("check mode changed to %s" % self.check_mode)
+ display.display("check mode changed to %s" % self.check_mode)
else:
display.display("Please specify check mode value, e.g. `check yes`")
+ display.v("check mode is currently %s." % self.check_mode)
def do_diff(self, arg):
"""Toggle whether plays run with diff"""
if arg:
self.diff = boolean(arg, strict=False)
- display.v("diff mode changed to %s" % self.diff)
+ display.display("diff mode changed to %s" % self.diff)
else:
display.display("Please specify a diff value , e.g. `diff yes`")
+ display.v("diff mode is currently %s" % self.diff)
+
+ def do_timeout(self, arg):
+ """Set the timeout"""
+ if arg:
+ try:
+ timeout = int(arg)
+ if timeout < 0:
+ display.error('The timeout must be greater than or equal to 1, use 0 to disable')
+ else:
+ self.task_timeout = timeout
+ except (TypeError, ValueError) as e:
+ display.error('The timeout must be a valid positive integer, or 0 to disable: %s' % to_text(e))
+ else:
+ display.display('Usage: timeout <seconds>')
def do_exit(self, args):
"""Exits from the console"""
- sys.stdout.write('\n')
+ sys.stdout.write('\nAnsible-console was exited.\n')
return -1
do_EOF = do_exit
@@ -419,6 +460,7 @@ class ConsoleCLI(CLI, cmd.Cmd):
self.check_mode = context.CLIARGS['check']
self.diff = context.CLIARGS['diff']
self.forks = context.CLIARGS['forks']
+ self.task_timeout = context.CLIARGS['task_timeout']
# dynamically add modules as commands
self.modules = self.list_modules()
diff --git a/lib/ansible/cli/doc.py b/lib/ansible/cli/doc.py
index 41baa796..047f6d65 100644
--- a/lib/ansible/cli/doc.py
+++ b/lib/ansible/cli/doc.py
@@ -7,7 +7,9 @@ __metaclass__ = type
import datetime
import json
+import pkgutil
import os
+import os.path
import re
import textwrap
import traceback
@@ -20,19 +22,21 @@ from ansible import context
from ansible.cli import CLI
from ansible.cli.arguments import option_helpers as opt_help
from ansible.collections.list import list_collection_dirs
-from ansible.errors import AnsibleError, AnsibleOptionsError
+from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleParserError
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.common._collections_compat import Container, Sequence
from ansible.module_utils.common.json import AnsibleJSONEncoder
-from ansible.module_utils.six import string_types
+from ansible.module_utils.compat import importlib
+from ansible.module_utils.six import iteritems, string_types
from ansible.parsing.plugin_docs import read_docstub
+from ansible.parsing.utils.yaml import from_yaml
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.plugins.loader import action_loader, fragment_loader
-from ansible.utils.collection_loader import AnsibleCollectionConfig
+from ansible.utils.collection_loader import AnsibleCollectionConfig, AnsibleCollectionRef
from ansible.utils.collection_loader._collection_finder import _get_collection_name_from_path
from ansible.utils.display import Display
from ansible.utils.plugin_docs import (
- BLACKLIST,
+ REJECTLIST,
remove_current_collection_from_versions_and_dates,
get_docstring,
get_versioned_doclink,
@@ -41,6 +45,11 @@ from ansible.utils.plugin_docs import (
display = Display()
+TARGET_OPTIONS = C.DOCUMENTABLE_PLUGINS + ('role', 'keyword',)
+PB_OBJECTS = ['Play', 'Role', 'Block', 'Task']
+PB_LOADED = {}
+
+
def jdump(text):
try:
display.display(json.dumps(text, cls=AnsibleJSONEncoder, sort_keys=True, indent=4))
@@ -63,7 +72,261 @@ class PluginNotFound(Exception):
pass
-class DocCLI(CLI):
+class RoleMixin(object):
+ """A mixin containing all methods relevant to role argument specification functionality.
+
+ Note: The methods for actual display of role data are not present here.
+ """
+
+ # Potential locations of the role arg spec file in the meta subdir, with main.yml
+ # having the lowest priority.
+ ROLE_ARGSPEC_FILES = ['argument_specs' + e for e in C.YAML_FILENAME_EXTENSIONS] + ["main" + e for e in C.YAML_FILENAME_EXTENSIONS]
+
+ def _load_argspec(self, role_name, collection_path=None, role_path=None):
+ """Load the role argument spec data from the source file.
+
+ :param str role_name: The name of the role for which we want the argspec data.
+ :param str collection_path: Path to the collection containing the role. This
+ will be None for standard roles.
+ :param str role_path: Path to the standard role. This will be None for
+ collection roles.
+
+ We support two files containing the role arg spec data: either meta/main.yml
+ or meta/argument_spec.yml. The argument_spec.yml file will take precedence
+ over the meta/main.yml file, if it exists. Data is NOT combined between the
+ two files.
+
+ :returns: A dict of all data underneath the ``argument_specs`` top-level YAML
+ key in the argspec data file. Empty dict is returned if there is no data.
+ """
+
+ if collection_path:
+ meta_path = os.path.join(collection_path, 'roles', role_name, 'meta')
+ elif role_path:
+ meta_path = os.path.join(role_path, 'meta')
+ else:
+ raise AnsibleError("A path is required to load argument specs for role '%s'" % role_name)
+
+ path = None
+
+ # Check all potential spec files
+ for specfile in self.ROLE_ARGSPEC_FILES:
+ full_path = os.path.join(meta_path, specfile)
+ if os.path.exists(full_path):
+ path = full_path
+ break
+
+ if path is None:
+ return {}
+
+ try:
+ with open(path, 'r') as f:
+ data = from_yaml(f.read(), file_name=path)
+ if data is None:
+ data = {}
+ return data.get('argument_specs', {})
+ except (IOError, OSError) as e:
+ raise AnsibleParserError("An error occurred while trying to read the file '%s': %s" % (path, to_native(e)), orig_exc=e)
+
+ def _find_all_normal_roles(self, role_paths, name_filters=None):
+ """Find all non-collection roles that have an argument spec file.
+
+ Note that argument specs do not actually need to exist within the spec file.
+
+ :param role_paths: A tuple of one or more role paths. When a role with the same name
+ is found in multiple paths, only the first-found role is returned.
+ :param name_filters: A tuple of one or more role names used to filter the results.
+
+ :returns: A set of tuples consisting of: role name, full role path
+ """
+ found = set()
+ found_names = set()
+
+ for path in role_paths:
+ if not os.path.isdir(path):
+ continue
+
+ # Check each subdir for an argument spec file
+ for entry in os.listdir(path):
+ role_path = os.path.join(path, entry)
+
+ # Check all potential spec files
+ for specfile in self.ROLE_ARGSPEC_FILES:
+ full_path = os.path.join(role_path, 'meta', specfile)
+ if os.path.exists(full_path):
+ if name_filters is None or entry in name_filters:
+ if entry not in found_names:
+ found.add((entry, role_path))
+ found_names.add(entry)
+ # select first-found
+ break
+ return found
+
+ def _find_all_collection_roles(self, name_filters=None, collection_filter=None):
+ """Find all collection roles with an argument spec file.
+
+ Note that argument specs do not actually need to exist within the spec file.
+
+ :param name_filters: A tuple of one or more role names used to filter the results. These
+ might be fully qualified with the collection name (e.g., community.general.roleA)
+ or not (e.g., roleA).
+
+ :param collection_filter: A string containing the FQCN of a collection which will be
+ used to limit results. This filter will take precedence over the name_filters.
+
+ :returns: A set of tuples consisting of: role name, collection name, collection path
+ """
+ found = set()
+ b_colldirs = list_collection_dirs(coll_filter=collection_filter)
+ for b_path in b_colldirs:
+ path = to_text(b_path, errors='surrogate_or_strict')
+ collname = _get_collection_name_from_path(b_path)
+
+ roles_dir = os.path.join(path, 'roles')
+ if os.path.exists(roles_dir):
+ for entry in os.listdir(roles_dir):
+
+ # Check all potential spec files
+ for specfile in self.ROLE_ARGSPEC_FILES:
+ full_path = os.path.join(roles_dir, entry, 'meta', specfile)
+ if os.path.exists(full_path):
+ if name_filters is None:
+ found.add((entry, collname, path))
+ else:
+ # Name filters might contain a collection FQCN or not.
+ for fqcn in name_filters:
+ if len(fqcn.split('.')) == 3:
+ (ns, col, role) = fqcn.split('.')
+ if '.'.join([ns, col]) == collname and entry == role:
+ found.add((entry, collname, path))
+ elif fqcn == entry:
+ found.add((entry, collname, path))
+ break
+ return found
+
+ def _build_summary(self, role, collection, argspec):
+ """Build a summary dict for a role.
+
+ Returns a simplified role arg spec containing only the role entry points and their
+ short descriptions, and the role collection name (if applicable).
+
+ :param role: The simple role name.
+ :param collection: The collection containing the role (None or empty string if N/A).
+ :param argspec: The complete role argspec data dict.
+
+ :returns: A tuple with the FQCN role name and a summary dict.
+ """
+ if collection:
+ fqcn = '.'.join([collection, role])
+ else:
+ fqcn = role
+ summary = {}
+ summary['collection'] = collection
+ summary['entry_points'] = {}
+ for ep in argspec.keys():
+ entry_spec = argspec[ep] or {}
+ summary['entry_points'][ep] = entry_spec.get('short_description', '')
+ return (fqcn, summary)
+
+ def _build_doc(self, role, path, collection, argspec, entry_point):
+ if collection:
+ fqcn = '.'.join([collection, role])
+ else:
+ fqcn = role
+ doc = {}
+ doc['path'] = path
+ doc['collection'] = collection
+ doc['entry_points'] = {}
+ for ep in argspec.keys():
+ if entry_point is None or ep == entry_point:
+ entry_spec = argspec[ep] or {}
+ doc['entry_points'][ep] = entry_spec
+
+ # If we didn't add any entry points (b/c of filtering), ignore this entry.
+ if len(doc['entry_points'].keys()) == 0:
+ doc = None
+
+ return (fqcn, doc)
+
+ def _create_role_list(self, roles_path, collection_filter=None):
+ """Return a dict describing the listing of all roles with arg specs.
+
+ :param role_paths: A tuple of one or more role paths.
+
+ :returns: A dict indexed by role name, with 'collection' and 'entry_points' keys per role.
+
+ Example return:
+
+ results = {
+ 'roleA': {
+ 'collection': '',
+ 'entry_points': {
+ 'main': 'Short description for main'
+ }
+ },
+ 'a.b.c.roleB': {
+ 'collection': 'a.b.c',
+ 'entry_points': {
+ 'main': 'Short description for main',
+ 'alternate': 'Short description for alternate entry point'
+ }
+ 'x.y.z.roleB': {
+ 'collection': 'x.y.z',
+ 'entry_points': {
+ 'main': 'Short description for main',
+ }
+ },
+ }
+ """
+ if not collection_filter:
+ roles = self._find_all_normal_roles(roles_path)
+ else:
+ roles = []
+ collroles = self._find_all_collection_roles(collection_filter=collection_filter)
+
+ result = {}
+
+ for role, role_path in roles:
+ argspec = self._load_argspec(role, role_path=role_path)
+ fqcn, summary = self._build_summary(role, '', argspec)
+ result[fqcn] = summary
+
+ for role, collection, collection_path in collroles:
+ argspec = self._load_argspec(role, collection_path=collection_path)
+ fqcn, summary = self._build_summary(role, collection, argspec)
+ result[fqcn] = summary
+
+ return result
+
+ def _create_role_doc(self, role_names, roles_path, entry_point=None):
+ """
+ :param role_names: A tuple of one or more role names.
+ :param role_paths: A tuple of one or more role paths.
+ :param entry_point: A role entry point name for filtering.
+
+ :returns: A dict indexed by role name, with 'collection', 'entry_points', and 'path' keys per role.
+ """
+ roles = self._find_all_normal_roles(roles_path, name_filters=role_names)
+ collroles = self._find_all_collection_roles(name_filters=role_names)
+
+ result = {}
+
+ for role, role_path in roles:
+ argspec = self._load_argspec(role, role_path=role_path)
+ fqcn, doc = self._build_doc(role, role_path, '', argspec, entry_point)
+ if doc:
+ result[fqcn] = doc
+
+ for role, collection, collection_path in collroles:
+ argspec = self._load_argspec(role, collection_path=collection_path)
+ fqcn, doc = self._build_doc(role, collection_path, collection, argspec, entry_point)
+ if doc:
+ result[fqcn] = doc
+
+ return result
+
+
+class DocCLI(CLI, RoleMixin):
''' displays information on modules installed in Ansible libraries.
It displays a terse listing of plugins and their short descriptions,
provides a printout of their DOCUMENTATION strings,
@@ -71,6 +334,7 @@ class DocCLI(CLI):
# default ignore list for detailed views
IGNORE = ('module', 'docuri', 'version_added', 'short_description', 'now_date', 'plainexamples', 'returndocs', 'collection')
+ JSON_IGNORE = ('attributes',)
# Warning: If you add more elements here, you also need to add it to the docsite build (in the
# ansible-community/antsibull repo)
@@ -83,6 +347,12 @@ class DocCLI(CLI):
_CONST = re.compile(r"\bC\(([^)]+)\)")
_RULER = re.compile(r"\bHORIZONTALLINE\b")
+ # rst specific
+ _REFTAG = re.compile(r":ref:")
+ _TERM = re.compile(r":term:")
+ _NOTES = re.compile(r".. note:")
+ _SEEALSO = re.compile(r"^\s*.. seealso:.*$", re.MULTILINE)
+
def __init__(self, args):
super(DocCLI, self).__init__(args)
@@ -100,6 +370,11 @@ class DocCLI(CLI):
t = cls._CONST.sub("`" + r"\1" + "'", t) # C(word) => `word'
t = cls._RULER.sub("\n{0}\n".format("-" * 13), t) # HORIZONTALLINE => -------
+ t = cls._REFTAG.sub(r"", t) # remove rst :ref:
+ t = cls._TERM.sub(r"", t) # remove rst :term:
+ t = cls._NOTES.sub(r" Note:", t) # nicer note
+ t = cls._SEEALSO.sub(r"", t) # remove seealso
+
return t
def init_parser(self):
@@ -114,13 +389,20 @@ class DocCLI(CLI):
opt_help.add_basedir_options(self.parser)
self.parser.add_argument('args', nargs='*', help='Plugin', metavar='plugin')
+
self.parser.add_argument("-t", "--type", action="store", default='module', dest='type',
help='Choose which plugin type (defaults to "module"). '
- 'Available plugin types are : {0}'.format(C.DOCUMENTABLE_PLUGINS),
- choices=C.DOCUMENTABLE_PLUGINS)
+ 'Available plugin types are : {0}'.format(TARGET_OPTIONS),
+ choices=TARGET_OPTIONS)
self.parser.add_argument("-j", "--json", action="store_true", default=False, dest='json_format',
help='Change output into json format.')
+ # role-specific options
+ self.parser.add_argument("-r", "--roles-path", dest='roles_path', default=C.DEFAULT_ROLES_PATH,
+ type=opt_help.unfrack_path(pathsep=True),
+ action=opt_help.PrependListAction,
+ help='The path to the directory containing your roles.')
+
exclusive = self.parser.add_mutually_exclusive_group()
exclusive.add_argument("-F", "--list_files", action="store_true", default=False, dest="list_files",
help='Show plugin names and their source files without summaries (implies --list). %s' % coll_filter)
@@ -130,6 +412,8 @@ class DocCLI(CLI):
help='Show playbook snippet for specified plugin(s)')
exclusive.add_argument("--metadata-dump", action="store_true", default=False, dest='dump',
help='**For internal testing only** Dump json metadata for all plugins.')
+ exclusive.add_argument("-e", "--entry-point", dest="entry_point",
+ help="Select the entry point for role(s).")
def post_process_args(self, options):
options = super(DocCLI, self).post_process_args(options)
@@ -172,112 +456,269 @@ class DocCLI(CLI):
# display results
DocCLI.pager("\n".join(text))
- def run(self):
+ def _display_available_roles(self, list_json):
+ """Display all roles we can find with a valid argument specification.
- super(DocCLI, self).run()
+ Output is: fqcn role name, entry point, short description
+ """
+ roles = list(list_json.keys())
+ entry_point_names = set()
+ for role in roles:
+ for entry_point in list_json[role]['entry_points'].keys():
+ entry_point_names.add(entry_point)
- plugin_type = context.CLIARGS['type']
- do_json = context.CLIARGS['json_format']
+ max_role_len = 0
+ max_ep_len = 0
- if plugin_type in C.DOCUMENTABLE_PLUGINS:
- loader = getattr(plugin_loader, '%s_loader' % plugin_type)
- else:
- raise AnsibleOptionsError("Unknown or undocumentable plugin type: %s" % plugin_type)
+ if roles:
+ max_role_len = max(len(x) for x in roles)
+ if entry_point_names:
+ max_ep_len = max(len(x) for x in entry_point_names)
- # add to plugin paths from command line
- basedir = context.CLIARGS['basedir']
- if basedir:
- AnsibleCollectionConfig.playbook_paths = basedir
- loader.add_directory(basedir, with_subdir=True)
+ linelimit = display.columns - max_role_len - max_ep_len - 5
+ text = []
+
+ for role in sorted(roles):
+ for entry_point, desc in iteritems(list_json[role]['entry_points']):
+ if len(desc) > linelimit:
+ desc = desc[:linelimit] + '...'
+ text.append("%-*s %-*s %s" % (max_role_len, role,
+ max_ep_len, entry_point,
+ desc))
- if context.CLIARGS['module_path']:
- for path in context.CLIARGS['module_path']:
- if path:
- loader.add_directory(path)
+ # display results
+ DocCLI.pager("\n".join(text))
- # save only top level paths for errors
- search_paths = DocCLI.print_paths(loader)
- loader._paths = None # reset so we can use subdirs below
+ def _display_role_doc(self, role_json):
+ roles = list(role_json.keys())
+ text = []
+ for role in roles:
+ text += self.get_role_man_text(role, role_json[role])
- # list plugins names or filepath for type, both options share most code
- if context.CLIARGS['list_files'] or context.CLIARGS['list_dir']:
+ # display results
+ DocCLI.pager("\n".join(text))
- coll_filter = None
- if len(context.CLIARGS['args']) == 1:
- coll_filter = context.CLIARGS['args'][0]
+ @staticmethod
+ def _list_keywords():
+ return from_yaml(pkgutil.get_data('ansible', 'keyword_desc.yml'))
- if coll_filter in ('', None):
- paths = loader._get_paths_with_context()
- for path_context in paths:
- self.plugin_list.update(
- DocCLI.find_plugins(path_context.path, path_context.internal, plugin_type))
+ @staticmethod
+ def _get_keywords_docs(keys):
- add_collection_plugins(self.plugin_list, plugin_type, coll_filter=coll_filter)
+ data = {}
+ descs = DocCLI._list_keywords()
+ for keyword in keys:
+ if keyword.startswith('with_'):
+ keyword = 'loop'
+ try:
+ # if no desc, typeerror raised ends this block
+ kdata = {'description': descs[keyword]}
- # get appropriate content depending on option
- if context.CLIARGS['list_dir']:
- results = self._get_plugin_list_descriptions(loader)
- elif context.CLIARGS['list_files']:
- results = self._get_plugin_list_filenames(loader)
-
- if do_json:
- jdump(results)
- elif self.plugin_list:
- self.display_plugin_list(results)
- else:
- display.warning("No plugins found.")
+ # get playbook objects for keyword and use first to get keyword attributes
+ kdata['applies_to'] = []
+ for pobj in PB_OBJECTS:
+ if pobj not in PB_LOADED:
+ obj_class = 'ansible.playbook.%s' % pobj.lower()
+ loaded_class = importlib.import_module(obj_class)
+ PB_LOADED[pobj] = getattr(loaded_class, pobj, None)
+
+ if keyword in PB_LOADED[pobj]._valid_attrs:
+ kdata['applies_to'].append(pobj)
+
+ # we should only need these once
+ if 'type' not in kdata:
+
+ fa = getattr(PB_LOADED[pobj], '_%s' % keyword)
+ if getattr(fa, 'private'):
+ kdata = {}
+ raise KeyError
+
+ kdata['type'] = getattr(fa, 'isa', 'string')
+
+ if keyword.endswith('when'):
+ kdata['template'] = 'implicit'
+ elif getattr(fa, 'static'):
+ kdata['template'] = 'static'
+ else:
+ kdata['template'] = 'explicit'
+
+ # those that require no processing
+ for visible in ('alias', 'priority'):
+ kdata[visible] = getattr(fa, visible)
+
+ # remove None keys
+ for k in list(kdata.keys()):
+ if kdata[k] is None:
+ del kdata[k]
+
+ data[keyword] = kdata
+
+ except KeyError as e:
+ display.warning("Skipping Invalid keyword '%s' specified: %s" % (keyword, to_native(e)))
+
+ return data
+
+ def _list_plugins(self, plugin_type, loader):
+
+ results = {}
+ coll_filter = None
+ if len(context.CLIARGS['args']) == 1:
+ coll_filter = context.CLIARGS['args'][0]
+
+ if coll_filter in ('', None):
+ paths = loader._get_paths_with_context()
+ for path_context in paths:
+ self.plugin_list.update(DocCLI.find_plugins(path_context.path, path_context.internal, plugin_type))
+
+ add_collection_plugins(self.plugin_list, plugin_type, coll_filter=coll_filter)
+
+ # get appropriate content depending on option
+ if context.CLIARGS['list_dir']:
+ results = self._get_plugin_list_descriptions(loader)
+ elif context.CLIARGS['list_files']:
+ results = self._get_plugin_list_filenames(loader)
# dump plugin desc/data as JSON
elif context.CLIARGS['dump']:
- plugin_data = {}
plugin_names = DocCLI.get_all_plugins_of_type(plugin_type)
for plugin_name in plugin_names:
plugin_info = DocCLI.get_plugin_metadata(plugin_type, plugin_name)
if plugin_info is not None:
- plugin_data[plugin_name] = plugin_info
+ results[plugin_name] = plugin_info
+
+ return results
+
+ def _get_plugins_docs(self, plugin_type, loader):
+
+ search_paths = DocCLI.print_paths(loader)
+
+ # display specific plugin docs
+ if len(context.CLIARGS['args']) == 0:
+ raise AnsibleOptionsError("Incorrect options passed")
+
+ # get the docs for plugins in the command line list
+ plugin_docs = {}
+ for plugin in context.CLIARGS['args']:
+ try:
+ doc, plainexamples, returndocs, metadata = DocCLI._get_plugin_doc(plugin, plugin_type, loader, search_paths)
+ except PluginNotFound:
+ display.warning("%s %s not found in:\n%s\n" % (plugin_type, plugin, search_paths))
+ continue
+ except Exception as e:
+ display.vvv(traceback.format_exc())
+ raise AnsibleError("%s %s missing documentation (or could not parse"
+ " documentation): %s\n" %
+ (plugin_type, plugin, to_native(e)))
+
+ if not doc:
+ # The doc section existed but was empty
+ continue
+
+ plugin_docs[plugin] = DocCLI._combine_plugin_doc(plugin, plugin_type, doc, plainexamples, returndocs, metadata)
+
+ return plugin_docs
+
+ def run(self):
+
+ super(DocCLI, self).run()
+
+ basedir = context.CLIARGS['basedir']
+ plugin_type = context.CLIARGS['type']
+ do_json = context.CLIARGS['json_format']
+ roles_path = context.CLIARGS['roles_path']
+ listing = context.CLIARGS['list_files'] or context.CLIARGS['list_dir'] or context.CLIARGS['dump']
+ docs = {}
+
+ if basedir:
+ AnsibleCollectionConfig.playbook_paths = basedir
- jdump(plugin_data)
+ # Add any 'roles' subdir in playbook dir to the roles search path.
+ # And as a last resort, add the playbook dir itself. Order being:
+ # - 'roles' subdir of playbook dir
+ # - DEFAULT_ROLES_PATH
+ # - playbook dir
+ # NOTE: This matches logic in RoleDefinition._load_role_path() method.
+ subdir = os.path.join(basedir, "roles")
+ if os.path.isdir(subdir):
+ roles_path = (subdir,) + roles_path
+ roles_path = roles_path + (basedir,)
+
+ if plugin_type not in TARGET_OPTIONS:
+ raise AnsibleOptionsError("Unknown or undocumentable plugin type: %s" % plugin_type)
+ elif plugin_type == 'keyword':
+
+ if listing:
+ docs = DocCLI._list_keywords()
+ else:
+ docs = DocCLI._get_keywords_docs(context.CLIARGS['args'])
+ elif plugin_type == 'role':
+ if context.CLIARGS['list_dir']:
+ # If an argument was given with --list, it is a collection filter
+ coll_filter = None
+ if len(context.CLIARGS['args']) == 1:
+ coll_filter = context.CLIARGS['args'][0]
+ if not AnsibleCollectionRef.is_valid_collection_name(coll_filter):
+ raise AnsibleError('Invalid collection name (must be of the form namespace.collection): {0}'.format(coll_filter))
+ elif len(context.CLIARGS['args']) > 1:
+ raise AnsibleOptionsError("Only a single collection filter is supported.")
+
+ docs = self._create_role_list(roles_path, collection_filter=coll_filter)
+ else:
+ docs = self._create_role_doc(context.CLIARGS['args'], roles_path, context.CLIARGS['entry_point'])
else:
- # display specific plugin docs
- if len(context.CLIARGS['args']) == 0:
- raise AnsibleOptionsError("Incorrect options passed")
-
- # get the docs for plugins in the command line list
- plugin_docs = {}
- for plugin in context.CLIARGS['args']:
- try:
- doc, plainexamples, returndocs, metadata = DocCLI._get_plugin_doc(plugin, plugin_type, loader, search_paths)
- except PluginNotFound:
- display.warning("%s %s not found in:\n%s\n" % (plugin_type, plugin, search_paths))
- continue
- except Exception as e:
- display.vvv(traceback.format_exc())
- raise AnsibleError("%s %s missing documentation (or could not parse"
- " documentation): %s\n" %
- (plugin_type, plugin, to_native(e)))
-
- if not doc:
- # The doc section existed but was empty
- continue
+ loader = getattr(plugin_loader, '%s_loader' % plugin_type)
+
+ # add to plugin paths from command line
+ basedir = context.CLIARGS['basedir']
+ if basedir:
+ AnsibleCollectionConfig.playbook_paths = basedir
+ loader.add_directory(basedir, with_subdir=True)
- plugin_docs[plugin] = DocCLI._combine_plugin_doc(plugin, plugin_type, doc, plainexamples, returndocs, metadata)
+ if context.CLIARGS['module_path']:
+ for path in context.CLIARGS['module_path']:
+ if path:
+ loader.add_directory(path)
- if do_json:
- jdump(plugin_docs)
+ # save only top level paths for errors
+ loader._paths = None # reset so we can use subdirs below
+ if listing:
+ docs = self._list_plugins(plugin_type, loader)
else:
- # Some changes to how plain text docs are formatted
- text = []
- for plugin, doc_data in plugin_docs.items():
- textret = DocCLI.format_plugin_doc(plugin, plugin_type,
- doc_data['doc'], doc_data['examples'],
- doc_data['return'], doc_data['metadata'])
- if textret:
- text.append(textret)
- else:
- display.warning("No valid documentation was retrieved from '%s'" % plugin)
-
- if text:
- DocCLI.pager(''.join(text))
+ docs = self._get_plugins_docs(plugin_type, loader)
+
+ if do_json:
+ for entry in docs.keys():
+ for forbid in DocCLI.JSON_IGNORE:
+ try:
+ del docs[entry]['doc'][forbid]
+ except (KeyError, TypeError):
+ pass
+ jdump(docs)
+ else:
+ text = []
+ if plugin_type in C.DOCUMENTABLE_PLUGINS:
+ if listing and docs:
+ self.display_plugin_list(docs)
+ else:
+ # Some changes to how plain text docs are formatted
+ for plugin, doc_data in docs.items():
+ textret = DocCLI.format_plugin_doc(plugin, plugin_type,
+ doc_data['doc'], doc_data['examples'],
+ doc_data['return'], doc_data['metadata'])
+ if textret:
+ text.append(textret)
+ else:
+ display.warning("No valid documentation was retrieved from '%s'" % plugin)
+ elif plugin_type == 'role':
+ if context.CLIARGS['list_dir'] and docs:
+ self._display_available_roles(docs)
+ elif docs:
+ self._display_role_doc(docs)
+ elif docs:
+ text = DocCLI._dump_yaml(docs, '')
+
+ if text:
+ DocCLI.pager(''.join(text))
return 0
@@ -417,7 +858,7 @@ class DocCLI(CLI):
continue
elif os.path.isdir(full_path):
continue
- elif any(plugin.endswith(x) for x in C.BLACKLIST_EXTS):
+ elif any(plugin.endswith(x) for x in C.REJECT_EXTS):
continue
elif plugin.startswith('__'):
continue
@@ -430,7 +871,7 @@ class DocCLI(CLI):
plugin = os.path.splitext(plugin)[0] # removes the extension
plugin = plugin.lstrip('_') # remove underscore from deprecated plugins
- if plugin not in BLACKLIST.get(bkey, ()):
+ if plugin not in REJECTLIST.get(bkey, ()):
if collection:
plugin = '%s.%s' % (collection, plugin)
@@ -599,6 +1040,10 @@ class DocCLI(CLI):
if conf:
text.append(DocCLI._dump_yaml({'set_via': conf}, opt_indent))
+ # Remove empty version_added_collection
+ if opt.get('version_added_collection') == '':
+ opt.pop('version_added_collection')
+
for k in sorted(opt):
if k.startswith('_'):
continue
@@ -619,6 +1064,62 @@ class DocCLI(CLI):
if not suboptions:
text.append('')
+ def get_role_man_text(self, role, role_json):
+ '''Generate text for the supplied role suitable for display.
+
+ This is similar to get_man_text(), but roles are different enough that we have
+ a separate method for formatting their display.
+
+ :param role: The role name.
+ :param role_json: The JSON for the given role as returned from _create_role_doc().
+
+ :returns: A array of text suitable for displaying to screen.
+ '''
+ text = []
+ opt_indent = " "
+ pad = display.columns * 0.20
+ limit = max(display.columns - int(pad), 70)
+
+ text.append("> %s (%s)\n" % (role.upper(), role_json.get('path')))
+
+ for entry_point in role_json['entry_points']:
+ doc = role_json['entry_points'][entry_point]
+
+ if doc.get('short_description'):
+ text.append("ENTRY POINT: %s - %s\n" % (entry_point, doc.get('short_description')))
+ else:
+ text.append("ENTRY POINT: %s\n" % entry_point)
+
+ if doc.get('description'):
+ if isinstance(doc['description'], list):
+ desc = " ".join(doc['description'])
+ else:
+ desc = doc['description']
+
+ text.append("%s\n" % textwrap.fill(DocCLI.tty_ify(desc),
+ limit, initial_indent=opt_indent,
+ subsequent_indent=opt_indent))
+ if doc.get('options'):
+ text.append("OPTIONS (= is mandatory):\n")
+ DocCLI.add_fields(text, doc.pop('options'), limit, opt_indent)
+ text.append('')
+
+ # generic elements we will handle identically
+ for k in ('author',):
+ if k not in doc:
+ continue
+ if isinstance(doc[k], string_types):
+ text.append('%s: %s' % (k.upper(), textwrap.fill(DocCLI.tty_ify(doc[k]),
+ limit - (len(k) + 2), subsequent_indent=opt_indent)))
+ elif isinstance(doc[k], (list, tuple)):
+ text.append('%s: %s' % (k.upper(), ', '.join(doc[k])))
+ else:
+ # use empty indent since this affects the start of the yaml doc, not it's keys
+ text.append(DocCLI._dump_yaml({k.upper(): doc[k]}, ''))
+ text.append('')
+
+ return text
+
@staticmethod
def get_man_text(doc, collection_name='', plugin_type=''):
# Create a copy so we don't modify the original
diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py
index 35d8fa80..22931497 100644
--- a/lib/ansible/cli/galaxy.py
+++ b/lib/ansible/cli/galaxy.py
@@ -1,13 +1,15 @@
# Copyright: (c) 2013, James Cammarata <jcammarata@ansible.com>
-# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2018-2021, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+import json
import os.path
import re
import shutil
+import sys
import textwrap
import time
import yaml
@@ -23,7 +25,6 @@ from ansible.galaxy import Galaxy, get_collections_galaxy_meta_info
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.collection import (
build_collection,
- CollectionRequirement,
download_collections,
find_existing_collections,
install_collections,
@@ -32,6 +33,10 @@ from ansible.galaxy.collection import (
validate_collection_path,
verify_collections
)
+from ansible.galaxy.collection.concrete_artifact_manager import (
+ ConcreteArtifactsManager,
+)
+from ansible.galaxy.dependency_resolution.dataclasses import Requirement
from ansible.galaxy.role import GalaxyRole
from ansible.galaxy.token import BasicAuthToken, GalaxyToken, KeycloakToken, NoTokenSentinel
@@ -51,6 +56,26 @@ display = Display()
urlparse = six.moves.urllib.parse.urlparse
+def with_collection_artifacts_manager(wrapped_method):
+ """Inject an artifacts manager if not passed explicitly.
+
+ This decorator constructs a ConcreteArtifactsManager and maintains
+ the related temporary directory auto-cleanup around the target
+ method invocation.
+ """
+ def method_wrapper(*args, **kwargs):
+ if 'artifacts_manager' in kwargs:
+ return wrapped_method(*args, **kwargs)
+
+ with ConcreteArtifactsManager.under_tmpdir(
+ C.DEFAULT_LOCAL_TMP,
+ validate_certs=not context.CLIARGS['ignore_certs'],
+ ) as concrete_artifact_cm:
+ kwargs['artifacts_manager'] = concrete_artifact_cm
+ return wrapped_method(*args, **kwargs)
+ return method_wrapper
+
+
def _display_header(path, h1, h2, w1=10, w2=7):
display.display('\n# {0}\n{1:{cwidth}} {2:{vwidth}}\n{3} {4}\n'.format(
path,
@@ -75,20 +100,19 @@ def _display_role(gr):
def _display_collection(collection, cwidth=10, vwidth=7, min_cwidth=10, min_vwidth=7):
display.display('{fqcn:{cwidth}} {version:{vwidth}}'.format(
- fqcn=to_text(collection),
- version=collection.latest_version,
+ fqcn=to_text(collection.fqcn),
+ version=collection.ver,
cwidth=max(cwidth, min_cwidth), # Make sure the width isn't smaller than the header
vwidth=max(vwidth, min_vwidth)
))
def _get_collection_widths(collections):
- if is_iterable(collections):
- fqcn_set = set(to_text(c) for c in collections)
- version_set = set(to_text(c.latest_version) for c in collections)
- else:
- fqcn_set = set([to_text(collections)])
- version_set = set([collections.latest_version])
+ if not is_iterable(collections):
+ collections = (collections, )
+
+ fqcn_set = {to_text(c.fqcn) for c in collections}
+ version_set = {to_text(c.ver) for c in collections}
fqcn_length = len(max(fqcn_set, key=len))
version_length = len(max(version_set, key=len))
@@ -120,10 +144,11 @@ class GalaxyCLI(CLI):
"to Galaxy. The key can be found at https://galaxy.ansible.com/me/preferences, and passed to the "
"ansible-galaxy CLI via a file at {0} or (insecurely) via the `--token` "
"command-line argument.".format(to_text(C.GALAXY_TOKEN_PATH)))
- exit(1)
+ sys.exit(1)
self.api_servers = []
self.galaxy = None
+ self._api = None
super(GalaxyCLI, self).__init__(args)
def init_parser(self):
@@ -163,13 +188,19 @@ class GalaxyCLI(CLI):
'writable one configured via DEFAULT_ROLES_PATH: %s ' % default_roles_path)
collections_path = opt_help.argparse.ArgumentParser(add_help=False)
- collections_path.add_argument('-p', '--collection-path', dest='collections_path', type=opt_help.unfrack_path(pathsep=True),
+ collections_path.add_argument('-p', '--collections-path', dest='collections_path', type=opt_help.unfrack_path(pathsep=True),
default=AnsibleCollectionConfig.collection_paths,
action=opt_help.PrependListAction,
help="One or more directories to search for collections in addition "
"to the default COLLECTIONS_PATHS. Separate multiple paths "
"with '{0}'.".format(os.path.pathsep))
+ cache_options = opt_help.argparse.ArgumentParser(add_help=False)
+ cache_options.add_argument('--clear-response-cache', dest='clear_response_cache', action='store_true',
+ default=False, help='Clear the existing server response cache.')
+ cache_options.add_argument('--no-cache', dest='no_cache', action='store_true', default=False,
+ help='Do not use the server response cache.')
+
# Add sub parser for the Galaxy role type (role or collection)
type_parser = self.parser.add_subparsers(metavar='TYPE', dest='type')
type_parser.required = True
@@ -178,11 +209,11 @@ class GalaxyCLI(CLI):
collection = type_parser.add_parser('collection', help='Manage an Ansible Galaxy collection.')
collection_parser = collection.add_subparsers(metavar='COLLECTION_ACTION', dest='action')
collection_parser.required = True
- self.add_download_options(collection_parser, parents=[common])
+ self.add_download_options(collection_parser, parents=[common, cache_options])
self.add_init_options(collection_parser, parents=[common, force])
self.add_build_options(collection_parser, parents=[common, force])
self.add_publish_options(collection_parser, parents=[common])
- self.add_install_options(collection_parser, parents=[common, force])
+ self.add_install_options(collection_parser, parents=[common, force, cache_options])
self.add_list_options(collection_parser, parents=[common, collections_path])
self.add_verify_options(collection_parser, parents=[common, collections_path])
@@ -271,6 +302,10 @@ class GalaxyCLI(CLI):
list_parser.add_argument(galaxy_type, help=galaxy_type.capitalize(), nargs='?', metavar=galaxy_type)
+ if galaxy_type == 'collection':
+ list_parser.add_argument('--format', dest='output_format', choices=('human', 'yaml', 'json'), default='human',
+ help="Format to display the list of collections in.")
+
def add_search_options(self, parser, parents=None):
search_parser = parser.add_parser('search', parents=parents,
help='Search the Galaxy database by tags, platforms, author and multiple '
@@ -328,6 +363,9 @@ class GalaxyCLI(CLI):
'path/url to a tar.gz collection artifact. This is mutually exclusive with --requirements-file.')
verify_parser.add_argument('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help='Ignore errors during verification and continue with the next specified collection.')
+ verify_parser.add_argument('--offline', dest='offline', action='store_true', default=False,
+ help='Validate collection integrity locally without contacting server for '
+ 'canonical manifest hash.')
verify_parser.add_argument('-r', '--requirements-file', dest='requirements',
help='A file containing a list of collections to be verified.')
@@ -368,6 +406,8 @@ class GalaxyCLI(CLI):
help='A file containing a list of collections to be installed.')
install_parser.add_argument('--pre', dest='allow_pre_release', action='store_true',
help='Include pre-release versions. Semantic versioning pre-releases are ignored by default')
+ install_parser.add_argument('-U', '--upgrade', dest='upgrade', action='store_true', default=False,
+ help='Upgrade installed collection artifacts. This will also update dependencies unless --no-deps is provided')
else:
install_parser.add_argument('-r', '--role-file', dest='requirements',
help='A file containing a list of roles to be installed.')
@@ -377,7 +417,7 @@ class GalaxyCLI(CLI):
def add_build_options(self, parser, parents=None):
build_parser = parser.add_parser('build', parents=parents,
- help='Build an Ansible collection artifact that can be publish to Ansible '
+ help='Build an Ansible collection artifact that can be published to Ansible '
'Galaxy.')
build_parser.set_defaults(func=self.execute_build)
@@ -427,15 +467,19 @@ class GalaxyCLI(CLI):
'required': required,
}
server_def = [('url', True), ('username', False), ('password', False), ('token', False),
- ('auth_url', False)]
+ ('auth_url', False), ('v3', False)]
validate_certs = not context.CLIARGS['ignore_certs']
+ galaxy_options = {'validate_certs': validate_certs}
+ for optional_key in ['clear_response_cache', 'no_cache']:
+ if optional_key in context.CLIARGS:
+ galaxy_options[optional_key] = context.CLIARGS[optional_key]
config_servers = []
# Need to filter out empty strings or non truthy values as an empty server list env var is equal to [''].
server_list = [s for s in C.GALAXY_SERVER_LIST or [] if s]
- for server_key in server_list:
+ for server_priority, server_key in enumerate(server_list, start=1):
# Config definitions are looked up dynamically based on the C.GALAXY_SERVER_LIST entry. We look up the
# section [galaxy_server.<server>] for the values url, username, password, and token.
config_dict = dict((k, server_config_def(server_key, k, req)) for k, req in server_def)
@@ -448,6 +492,14 @@ class GalaxyCLI(CLI):
auth_url = server_options.pop('auth_url', None)
token_val = server_options['token'] or NoTokenSentinel
username = server_options['username']
+ available_api_versions = None
+ v3 = server_options.pop('v3', None)
+ if v3:
+ # This allows a user to explicitly indicate the server uses the /v3 API
+ # This was added for testing against pulp_ansible and I'm not sure it has
+ # a practical purpose outside of this use case. As such, this option is not
+ # documented as of now
+ server_options['available_api_versions'] = {'v3': '/v3'}
# default case if no auth info is provided.
server_options['token'] = None
@@ -465,9 +517,12 @@ class GalaxyCLI(CLI):
# The galaxy v1 / github / django / 'Token'
server_options['token'] = GalaxyToken(token=token_val)
- server_options['validate_certs'] = validate_certs
-
- config_servers.append(GalaxyAPI(self.galaxy, server_key, **server_options))
+ server_options.update(galaxy_options)
+ config_servers.append(GalaxyAPI(
+ self.galaxy, server_key,
+ priority=server_priority,
+ **server_options
+ ))
cmd_server = context.CLIARGS['api_server']
cmd_token = GalaxyToken(token=context.CLIARGS['api_key'])
@@ -478,26 +533,46 @@ class GalaxyCLI(CLI):
if config_server:
self.api_servers.append(config_server)
else:
- self.api_servers.append(GalaxyAPI(self.galaxy, 'cmd_arg', cmd_server, token=cmd_token,
- validate_certs=validate_certs))
+ self.api_servers.append(GalaxyAPI(
+ self.galaxy, 'cmd_arg', cmd_server, token=cmd_token,
+ priority=len(config_servers) + 1,
+ **galaxy_options
+ ))
else:
self.api_servers = config_servers
# Default to C.GALAXY_SERVER if no servers were defined
if len(self.api_servers) == 0:
- self.api_servers.append(GalaxyAPI(self.galaxy, 'default', C.GALAXY_SERVER, token=cmd_token,
- validate_certs=validate_certs))
+ self.api_servers.append(GalaxyAPI(
+ self.galaxy, 'default', C.GALAXY_SERVER, token=cmd_token,
+ priority=0,
+ **galaxy_options
+ ))
- context.CLIARGS['func']()
+ return context.CLIARGS['func']()
@property
def api(self):
- return self.api_servers[0]
+ if self._api:
+ return self._api
+
+ for server in self.api_servers:
+ try:
+ if u'v1' in server.available_api_versions:
+ self._api = server
+ break
+ except Exception:
+ continue
+
+ if not self._api:
+ self._api = self.api_servers[0]
+
+ return self._api
def _get_default_collection_path(self):
return C.COLLECTIONS_PATHS[0]
- def _parse_requirements_file(self, requirements_file, allow_old_format=True):
+ def _parse_requirements_file(self, requirements_file, allow_old_format=True, artifacts_manager=None):
"""
Parses an Ansible requirement.yml file and returns all the roles and/or collections defined in it. There are 2
requirements file format:
@@ -523,6 +598,7 @@ class GalaxyCLI(CLI):
:param requirements_file: The path to the requirements file.
:param allow_old_format: Will fail if a v1 requirements file is found and this is set to False.
+ :param artifacts_manager: Artifacts manager.
:return: a dict containing roles and collections to found in the requirements file.
"""
requirements = {
@@ -586,33 +662,48 @@ class GalaxyCLI(CLI):
for role_req in file_requirements.get('roles') or []:
requirements['roles'] += parse_role_req(role_req)
- for collection_req in file_requirements.get('collections') or []:
- if isinstance(collection_req, dict):
- req_name = collection_req.get('name', None)
- if req_name is None:
- raise AnsibleError("Collections requirement entry should contain the key name.")
-
- req_type = collection_req.get('type')
- if req_type not in ('file', 'galaxy', 'git', 'url', None):
- raise AnsibleError("The collection requirement entry key 'type' must be one of file, galaxy, git, or url.")
-
- req_version = collection_req.get('version', '*')
- req_source = collection_req.get('source', None)
- if req_source:
- # Try and match up the requirement source with our list of Galaxy API servers defined in the
- # config, otherwise create a server with that URL without any auth.
- req_source = next(iter([a for a in self.api_servers if req_source in [a.name, a.api_server]]),
- GalaxyAPI(self.galaxy,
- "explicit_requirement_%s" % req_name,
- req_source,
- validate_certs=not context.CLIARGS['ignore_certs']))
-
- requirements['collections'].append((req_name, req_version, req_source, req_type))
- else:
- requirements['collections'].append((collection_req, '*', None, None))
+ requirements['collections'] = [
+ Requirement.from_requirement_dict(
+ self._init_coll_req_dict(collection_req),
+ artifacts_manager,
+ )
+ for collection_req in file_requirements.get('collections') or []
+ ]
return requirements
+ def _init_coll_req_dict(self, coll_req):
+ if not isinstance(coll_req, dict):
+ # Assume it's a string:
+ return {'name': coll_req}
+
+ if (
+ 'name' not in coll_req or
+ not coll_req.get('source') or
+ coll_req.get('type', 'galaxy') != 'galaxy'
+ ):
+ return coll_req
+
+ # Try and match up the requirement source with our list of Galaxy API
+ # servers defined in the config, otherwise create a server with that
+ # URL without any auth.
+ coll_req['source'] = next(
+ iter(
+ srvr for srvr in self.api_servers
+ if coll_req['source'] in {srvr.name, srvr.api_server}
+ ),
+ GalaxyAPI(
+ self.galaxy,
+ 'explicit_requirement_{name!s}'.format(
+ name=coll_req['name'],
+ ),
+ coll_req['source'],
+ validate_certs=not context.CLIARGS['ignore_certs'],
+ ),
+ )
+
+ return coll_req
+
@staticmethod
def exit_without_ignore(rc=1):
"""
@@ -646,6 +737,8 @@ class GalaxyCLI(CLI):
else:
text.append(u"\t%s: %s" % (k, role_info[k]))
+ # make sure we have a trailing newline returned
+ text.append(u"")
return u'\n'.join(text)
@staticmethod
@@ -698,26 +791,29 @@ class GalaxyCLI(CLI):
return meta_value
- def _require_one_of_collections_requirements(self, collections, requirements_file):
+ def _require_one_of_collections_requirements(
+ self, collections, requirements_file,
+ artifacts_manager=None,
+ ):
if collections and requirements_file:
raise AnsibleError("The positional collection_name arg and --requirements-file are mutually exclusive.")
elif not collections and not requirements_file:
raise AnsibleError("You must specify a collection name or a requirements file.")
elif requirements_file:
requirements_file = GalaxyCLI._resolve_path(requirements_file)
- requirements = self._parse_requirements_file(requirements_file, allow_old_format=False)
+ requirements = self._parse_requirements_file(
+ requirements_file,
+ allow_old_format=False,
+ artifacts_manager=artifacts_manager,
+ )
else:
- requirements = {'collections': [], 'roles': []}
- for collection_input in collections:
- requirement = None
- if os.path.isfile(to_bytes(collection_input, errors='surrogate_or_strict')) or \
- urlparse(collection_input).scheme.lower() in ['http', 'https'] or \
- collection_input.startswith(('git+', 'git@')):
- # Arg is a file path or URL to a collection
- name = collection_input
- else:
- name, dummy, requirement = collection_input.partition(':')
- requirements['collections'].append((name, requirement or '*', None, None))
+ requirements = {
+ 'collections': [
+ Requirement.from_string(coll_input, artifacts_manager)
+ for coll_input in collections
+ ],
+ 'roles': [],
+ }
return requirements
############################
@@ -757,27 +853,37 @@ class GalaxyCLI(CLI):
for collection_path in context.CLIARGS['args']:
collection_path = GalaxyCLI._resolve_path(collection_path)
- build_collection(collection_path, output_path, force)
+ build_collection(
+ to_text(collection_path, errors='surrogate_or_strict'),
+ to_text(output_path, errors='surrogate_or_strict'),
+ force,
+ )
- def execute_download(self):
+ @with_collection_artifacts_manager
+ def execute_download(self, artifacts_manager=None):
collections = context.CLIARGS['args']
no_deps = context.CLIARGS['no_deps']
download_path = context.CLIARGS['download_path']
- ignore_certs = context.CLIARGS['ignore_certs']
requirements_file = context.CLIARGS['requirements']
if requirements_file:
requirements_file = GalaxyCLI._resolve_path(requirements_file)
- requirements = self._require_one_of_collections_requirements(collections, requirements_file)['collections']
+ requirements = self._require_one_of_collections_requirements(
+ collections, requirements_file,
+ artifacts_manager=artifacts_manager,
+ )['collections']
download_path = GalaxyCLI._resolve_path(download_path)
b_download_path = to_bytes(download_path, errors='surrogate_or_strict')
if not os.path.exists(b_download_path):
os.makedirs(b_download_path)
- download_collections(requirements, download_path, self.api_servers, (not ignore_certs), no_deps,
- context.CLIARGS['allow_pre_release'])
+ download_collections(
+ requirements, download_path, self.api_servers, no_deps,
+ context.CLIARGS['allow_pre_release'],
+ artifacts_manager=artifacts_manager,
+ )
return 0
@@ -965,33 +1071,47 @@ class GalaxyCLI(CLI):
if role_spec:
role_info.update(role_spec)
- data = self._display_role_info(role_info)
+ data += self._display_role_info(role_info)
self.pager(data)
- def execute_verify(self):
+ @with_collection_artifacts_manager
+ def execute_verify(self, artifacts_manager=None):
collections = context.CLIARGS['args']
search_paths = context.CLIARGS['collections_path']
- ignore_certs = context.CLIARGS['ignore_certs']
ignore_errors = context.CLIARGS['ignore_errors']
+ local_verify_only = context.CLIARGS['offline']
requirements_file = context.CLIARGS['requirements']
- requirements = self._require_one_of_collections_requirements(collections, requirements_file)['collections']
+ requirements = self._require_one_of_collections_requirements(
+ collections, requirements_file,
+ artifacts_manager=artifacts_manager,
+ )['collections']
resolved_paths = [validate_collection_path(GalaxyCLI._resolve_path(path)) for path in search_paths]
- verify_collections(requirements, resolved_paths, self.api_servers, (not ignore_certs), ignore_errors,
- allow_pre_release=True)
+ results = verify_collections(
+ requirements, resolved_paths,
+ self.api_servers, ignore_errors,
+ local_verify_only=local_verify_only,
+ artifacts_manager=artifacts_manager,
+ )
+
+ if any(result for result in results if not result.success):
+ return 1
return 0
- def execute_install(self):
+ @with_collection_artifacts_manager
+ def execute_install(self, artifacts_manager=None):
"""
Install one or more roles(``ansible-galaxy role install``), or one or more collections(``ansible-galaxy collection install``).
You can pass in a list (roles or collections) or use the file
option listed below (these are mutually exclusive). If you pass in a list, it
can be a name (which will be downloaded via the galaxy API and github), or it can be a local tar archive file.
+
+ :param artifacts_manager: Artifacts manager.
"""
install_items = context.CLIARGS['args']
requirements_file = context.CLIARGS['requirements']
@@ -1009,7 +1129,10 @@ class GalaxyCLI(CLI):
role_requirements = []
if context.CLIARGS['type'] == 'collection':
collection_path = GalaxyCLI._resolve_path(context.CLIARGS['collections_path'])
- requirements = self._require_one_of_collections_requirements(install_items, requirements_file)
+ requirements = self._require_one_of_collections_requirements(
+ install_items, requirements_file,
+ artifacts_manager=artifacts_manager,
+ )
collection_requirements = requirements['collections']
if requirements['roles']:
@@ -1022,7 +1145,10 @@ class GalaxyCLI(CLI):
if not (requirements_file.endswith('.yaml') or requirements_file.endswith('.yml')):
raise AnsibleError("Invalid role requirements file, it must end with a .yml or .yaml extension")
- requirements = self._parse_requirements_file(requirements_file)
+ requirements = self._parse_requirements_file(
+ requirements_file,
+ artifacts_manager=artifacts_manager,
+ )
role_requirements = requirements['roles']
# We can only install collections and roles at the same time if the type wasn't specified and the -p
@@ -1057,15 +1183,21 @@ class GalaxyCLI(CLI):
display.display("Starting galaxy collection install process")
# Collections can technically be installed even when ansible-galaxy is in role mode so we need to pass in
# the install path as context.CLIARGS['collections_path'] won't be set (default is calculated above).
- self._execute_install_collection(collection_requirements, collection_path)
+ self._execute_install_collection(
+ collection_requirements, collection_path,
+ artifacts_manager=artifacts_manager,
+ )
- def _execute_install_collection(self, requirements, path):
+ def _execute_install_collection(
+ self, requirements, path, artifacts_manager,
+ ):
force = context.CLIARGS['force']
- ignore_certs = context.CLIARGS['ignore_certs']
ignore_errors = context.CLIARGS['ignore_errors']
no_deps = context.CLIARGS['no_deps']
force_with_deps = context.CLIARGS['force_with_deps']
- allow_pre_release = context.CLIARGS['allow_pre_release'] if 'allow_pre_release' in context.CLIARGS else False
+ # If `ansible-galaxy install` is used, collection-only options aren't available to the user and won't be in context.CLIARGS
+ allow_pre_release = context.CLIARGS.get('allow_pre_release', False)
+ upgrade = context.CLIARGS.get('upgrade', False)
collections_path = C.COLLECTIONS_PATHS
if len([p for p in collections_path if p.startswith(path)]) == 0:
@@ -1078,8 +1210,12 @@ class GalaxyCLI(CLI):
if not os.path.exists(b_output_path):
os.makedirs(b_output_path)
- install_collections(requirements, output_path, self.api_servers, (not ignore_certs), ignore_errors,
- no_deps, force, force_with_deps, allow_pre_release=allow_pre_release)
+ install_collections(
+ requirements, output_path, self.api_servers, ignore_errors,
+ no_deps, force, force_with_deps, upgrade,
+ allow_pre_release=allow_pre_release,
+ artifacts_manager=artifacts_manager,
+ )
return 0
@@ -1250,14 +1386,19 @@ class GalaxyCLI(CLI):
return 0
- def execute_list_collection(self):
+ @with_collection_artifacts_manager
+ def execute_list_collection(self, artifacts_manager=None):
"""
List all collections installed on the local system
+
+ :param artifacts_manager: Artifacts manager.
"""
+ output_format = context.CLIARGS['output_format']
collections_search_paths = set(context.CLIARGS['collections_path'])
collection_name = context.CLIARGS['collection']
default_collections_path = AnsibleCollectionConfig.collection_paths
+ collections_in_paths = {}
warnings = []
path_found = False
@@ -1295,8 +1436,23 @@ class GalaxyCLI(CLI):
continue
collection_found = True
- collection = CollectionRequirement.from_path(b_collection_path, False, fallback_metadata=True)
- fqcn_width, version_width = _get_collection_widths(collection)
+
+ try:
+ collection = Requirement.from_dir_path_as_unknown(
+ b_collection_path,
+ artifacts_manager,
+ )
+ except ValueError as val_err:
+ six.raise_from(AnsibleError(val_err), val_err)
+
+ if output_format in {'yaml', 'json'}:
+ collections_in_paths[collection_path] = {
+ collection.fqcn: {'version': collection.ver}
+ }
+
+ continue
+
+ fqcn_width, version_width = _get_collection_widths([collection])
_display_header(collection_path, 'Collection', 'Version', fqcn_width, version_width)
_display_collection(collection, fqcn_width, version_width)
@@ -1306,7 +1462,9 @@ class GalaxyCLI(CLI):
collection_path = validate_collection_path(path)
if os.path.isdir(collection_path):
display.vvv("Searching {0} for collections".format(collection_path))
- collections = find_existing_collections(collection_path, fallback_metadata=True)
+ collections = list(find_existing_collections(
+ collection_path, artifacts_manager,
+ ))
else:
# There was no 'ansible_collections/' directory in the path, so there
# or no collections here.
@@ -1317,13 +1475,19 @@ class GalaxyCLI(CLI):
display.vvv("No collections found at {0}".format(collection_path))
continue
+ if output_format in {'yaml', 'json'}:
+ collections_in_paths[collection_path] = {
+ collection.fqcn: {'version': collection.ver} for collection in collections
+ }
+
+ continue
+
# Display header
fqcn_width, version_width = _get_collection_widths(collections)
_display_header(collection_path, 'Collection', 'Version', fqcn_width, version_width)
# Sort collections by the namespace and name
- collections.sort(key=to_text)
- for collection in collections:
+ for collection in sorted(collections, key=to_text):
_display_collection(collection, fqcn_width, version_width)
# Do not warn if the specific collection was found in any of the search paths
@@ -1336,6 +1500,11 @@ class GalaxyCLI(CLI):
if not path_found:
raise AnsibleOptionsError("- None of the provided paths were usable. Please specify a valid path with --{0}s-path".format(context.CLIARGS['type']))
+ if output_format == 'json':
+ display.display(json.dumps(collections_in_paths))
+ elif output_format == 'yaml':
+ display.display(yaml.safe_dump(collections_in_paths))
+
return 0
def execute_publish(self):
diff --git a/lib/ansible/cli/inventory.py b/lib/ansible/cli/inventory.py
index 58df4a5a..dc461577 100644
--- a/lib/ansible/cli/inventory.py
+++ b/lib/ansible/cli/inventory.py
@@ -15,7 +15,7 @@ from ansible import context
from ansible.cli import CLI
from ansible.cli.arguments import option_helpers as opt_help
from ansible.errors import AnsibleError, AnsibleOptionsError
-from ansible.module_utils._text import to_bytes, to_native
+from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.utils.vars import combine_vars
from ansible.utils.display import Display
from ansible.vars.plugins import get_vars_from_inventory_sources, get_vars_from_path
@@ -64,6 +64,7 @@ class InventoryCLI(CLI):
opt_help.add_inventory_options(self.parser)
opt_help.add_vault_options(self.parser)
opt_help.add_basedir_options(self.parser)
+ opt_help.add_runtask_options(self.parser)
# remove unused default options
self.parser.add_argument('-l', '--limit', help=argparse.SUPPRESS, action=opt_help.UnrecognizedArgument, nargs='?')
@@ -157,8 +158,8 @@ class InventoryCLI(CLI):
display.display(results)
else:
try:
- with open(to_bytes(outfile), 'wt') as f:
- f.write(results)
+ with open(to_bytes(outfile), 'wb') as f:
+ f.write(to_bytes(results))
except (OSError, IOError) as e:
raise AnsibleError('Unable to write to destination file (%s): %s' % (to_native(outfile), to_native(e)))
sys.exit(0)
@@ -171,7 +172,7 @@ class InventoryCLI(CLI):
if context.CLIARGS['yaml']:
import yaml
from ansible.parsing.yaml.dumper import AnsibleDumper
- results = yaml.dump(stuff, Dumper=AnsibleDumper, default_flow_style=False)
+ results = to_text(yaml.dump(stuff, Dumper=AnsibleDumper, default_flow_style=False, allow_unicode=True))
elif context.CLIARGS['toml']:
from ansible.plugins.inventory.toml import toml_dumps, HAS_TOML
if not HAS_TOML:
@@ -183,9 +184,9 @@ class InventoryCLI(CLI):
import json
from ansible.parsing.ajson import AnsibleJSONEncoder
try:
- results = json.dumps(stuff, cls=AnsibleJSONEncoder, sort_keys=True, indent=4, preprocess_unsafe=True)
+ results = json.dumps(stuff, cls=AnsibleJSONEncoder, sort_keys=True, indent=4, preprocess_unsafe=True, ensure_ascii=False)
except TypeError as e:
- results = json.dumps(stuff, cls=AnsibleJSONEncoder, sort_keys=False, indent=4, preprocess_unsafe=True)
+ results = json.dumps(stuff, cls=AnsibleJSONEncoder, sort_keys=False, indent=4, preprocess_unsafe=True, ensure_ascii=False)
display.warning("Could not sort JSON output due to issues while sorting keys: %s" % to_native(e))
return results
diff --git a/lib/ansible/cli/playbook.py b/lib/ansible/cli/playbook.py
index f8246b2a..c3ec8321 100644
--- a/lib/ansible/cli/playbook.py
+++ b/lib/ansible/cli/playbook.py
@@ -16,10 +16,10 @@ from ansible.errors import AnsibleError
from ansible.executor.playbook_executor import PlaybookExecutor
from ansible.module_utils._text import to_bytes
from ansible.playbook.block import Block
-from ansible.utils.display import Display
-from ansible.utils.collection_loader import AnsibleCollectionConfig
-from ansible.utils.collection_loader._collection_finder import _get_collection_name_from_path
from ansible.plugins.loader import add_all_plugin_dirs
+from ansible.utils.collection_loader import AnsibleCollectionConfig
+from ansible.utils.collection_loader._collection_finder import _get_collection_name_from_path, _get_collection_playbook_path
+from ansible.utils.display import Display
display = Display()
@@ -78,27 +78,35 @@ class PlaybookCLI(CLI):
# initial error check, to make sure all specified playbooks are accessible
# before we start running anything through the playbook executor
-
+ # also prep plugin paths
b_playbook_dirs = []
for playbook in context.CLIARGS['args']:
- if not os.path.exists(playbook):
- raise AnsibleError("the playbook: %s could not be found" % playbook)
- if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)):
- raise AnsibleError("the playbook: %s does not appear to be a file" % playbook)
- b_playbook_dir = os.path.dirname(os.path.abspath(to_bytes(playbook, errors='surrogate_or_strict')))
- # load plugins from all playbooks in case they add callbacks/inventory/etc
- add_all_plugin_dirs(b_playbook_dir)
-
- b_playbook_dirs.append(b_playbook_dir)
-
- AnsibleCollectionConfig.playbook_paths = b_playbook_dirs
-
- playbook_collection = _get_collection_name_from_path(b_playbook_dirs[0])
-
- if playbook_collection:
- display.warning("running playbook inside collection {0}".format(playbook_collection))
- AnsibleCollectionConfig.default_collection = playbook_collection
+ # resolve if it is collection playbook with FQCN notation, if not, leaves unchanged
+ resource = _get_collection_playbook_path(playbook)
+ if resource is not None:
+ playbook_collection = resource[2]
+ else:
+ # not an FQCN so must be a file
+ if not os.path.exists(playbook):
+ raise AnsibleError("the playbook: %s could not be found" % playbook)
+ if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)):
+ raise AnsibleError("the playbook: %s does not appear to be a file" % playbook)
+
+ # check if playbook is from collection (path can be passed directly)
+ playbook_collection = _get_collection_name_from_path(playbook)
+
+ # don't add collection playbooks to adjacency search path
+ if not playbook_collection:
+ # setup dirs to enable loading plugins from all playbooks in case they add callbacks/inventory/etc
+ b_playbook_dir = os.path.dirname(os.path.abspath(to_bytes(playbook, errors='surrogate_or_strict')))
+ add_all_plugin_dirs(b_playbook_dir)
+ b_playbook_dirs.append(b_playbook_dir)
+
+ if b_playbook_dirs:
+ # allow collections adjacent to these playbooks
+ # we use list copy to avoid opening up 'adjacency' in the previous loop
+ AnsibleCollectionConfig.playbook_paths = b_playbook_dirs
# don't deal with privilege escalation or passwords when we don't need to
if not (context.CLIARGS['listhosts'] or context.CLIARGS['listtasks'] or
@@ -139,7 +147,13 @@ class PlaybookCLI(CLI):
pb_dir = os.path.realpath(os.path.dirname(p['playbook']))
loader.set_basedir(pb_dir)
- msg = "\n play #%d (%s): %s" % (idx + 1, ','.join(play.hosts), play.name)
+ # show host list if we were able to template into a list
+ try:
+ host_list = ','.join(play.hosts)
+ except TypeError:
+ host_list = ''
+
+ msg = "\n play #%d (%s): %s" % (idx + 1, host_list, play.name)
mytags = set(play.tags)
msg += '\tTAGS: [%s]' % (','.join(mytags))
@@ -163,7 +177,7 @@ class PlaybookCLI(CLI):
if isinstance(task, Block):
taskmsg += _process_block(task)
else:
- if task.action in C._ACTION_META:
+ if task.action in C._ACTION_META and task.implicit:
continue
all_tags.update(task.tags)
diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py
index 55b5a2b0..0ebd431b 100644
--- a/lib/ansible/cli/pull.py
+++ b/lib/ansible/cli/pull.py
@@ -94,7 +94,8 @@ class PullCLI(CLI):
'This is a useful way to disperse git requests')
self.parser.add_argument('-f', '--force', dest='force', default=False, action='store_true',
help='run the playbook even if the repository could not be updated')
- self.parser.add_argument('-d', '--directory', dest='dest', default=None, help='directory to checkout repository to')
+ self.parser.add_argument('-d', '--directory', dest='dest', default=None,
+ help='absolute path of repository checkout directory (relative paths are not supported)')
self.parser.add_argument('-U', '--url', dest='url', default=None, help='URL of the playbook repository')
self.parser.add_argument('--full', dest='fullclone', action='store_true', help='Do a full clone, instead of a shallow one.')
self.parser.add_argument('-C', '--checkout', dest='checkout',
@@ -311,19 +312,26 @@ class PullCLI(CLI):
@staticmethod
def select_playbook(path):
playbook = None
+ errors = []
if context.CLIARGS['args'] and context.CLIARGS['args'][0] is not None:
- playbook = os.path.join(path, context.CLIARGS['args'][0])
- rc = PullCLI.try_playbook(playbook)
- if rc != 0:
- display.warning("%s: %s" % (playbook, PullCLI.PLAYBOOK_ERRORS[rc]))
- return None
+ playbooks = []
+ for book in context.CLIARGS['args']:
+ book_path = os.path.join(path, book)
+ rc = PullCLI.try_playbook(book_path)
+ if rc != 0:
+ errors.append("%s: %s" % (book_path, PullCLI.PLAYBOOK_ERRORS[rc]))
+ continue
+ playbooks.append(book_path)
+ if 0 < len(errors):
+ display.warning("\n".join(errors))
+ elif len(playbooks) == len(context.CLIARGS['args']):
+ playbook = " ".join(playbooks)
return playbook
else:
fqdn = socket.getfqdn()
hostpb = os.path.join(path, fqdn + '.yml')
shorthostpb = os.path.join(path, fqdn.split('.')[0] + '.yml')
localpb = os.path.join(path, PullCLI.DEFAULT_PLAYBOOK)
- errors = []
for pb in [hostpb, shorthostpb, localpb]:
rc = PullCLI.try_playbook(pb)
if rc == 0:
diff --git a/lib/ansible/cli/scripts/ansible_cli_stub.py b/lib/ansible/cli/scripts/ansible_cli_stub.py
index 2ede010e..f82c47b7 100755
--- a/lib/ansible/cli/scripts/ansible_cli_stub.py
+++ b/lib/ansible/cli/scripts/ansible_cli_stub.py
@@ -22,7 +22,7 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-__requires__ = ['ansible_base']
+__requires__ = ['ansible_core']
import errno
@@ -38,6 +38,7 @@ from ansible.module_utils._text import to_text
# Used for determining if the system is running a new enough python version
# and should only restrict on our documented minimum versions
+_PY38_MIN = sys.version_info[:2] >= (3, 8)
_PY3_MIN = sys.version_info[:2] >= (3, 5)
_PY2_MIN = (2, 6) <= sys.version_info[:2] < (3,)
_PY_MIN = _PY3_MIN or _PY2_MIN
@@ -60,16 +61,27 @@ if __name__ == '__main__':
try: # bad ANSIBLE_CONFIG or config options can force ugly stacktrace
import ansible.constants as C
- from ansible.utils.display import Display
+ from ansible.utils.display import Display, initialize_locale
except AnsibleOptionsError as e:
display.error(to_text(e), wrap_text=False)
sys.exit(5)
+ initialize_locale()
+
cli = None
me = os.path.basename(sys.argv[0])
try:
display = Display()
+ if C.CONTROLLER_PYTHON_WARNING and not _PY38_MIN:
+ display.deprecated(
+ (
+ 'Ansible will require Python 3.8 or newer on the controller starting with Ansible 2.12. '
+ 'Current version: %s' % ''.join(sys.version.splitlines())
+ ),
+ version='2.12',
+ collection_name='ansible.builtin',
+ )
display.debug("starting run")
sub = None
diff --git a/lib/ansible/cli/scripts/ansible_connection_cli_stub.py b/lib/ansible/cli/scripts/ansible_connection_cli_stub.py
index d701f156..1b85c1c3 100755
--- a/lib/ansible/cli/scripts/ansible_connection_cli_stub.py
+++ b/lib/ansible/cli/scripts/ansible_connection_cli_stub.py
@@ -4,7 +4,7 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-__requires__ = ['ansible_base']
+__requires__ = ['ansible_core']
import fcntl
diff --git a/lib/ansible/cli/vault.py b/lib/ansible/cli/vault.py
index 0425a1a3..e08a3bb4 100644
--- a/lib/ansible/cli/vault.py
+++ b/lib/ansible/cli/vault.py
@@ -99,6 +99,8 @@ class VaultCLI(CLI):
enc_str_parser.add_argument('-p', '--prompt', dest='encrypt_string_prompt',
action='store_true',
help="Prompt for the string to encrypt")
+ enc_str_parser.add_argument('--show-input', dest='show_string_input', default=False, action='store_true',
+ help='Do not hide input when prompted for the string to encrypt')
enc_str_parser.add_argument('-n', '--name', dest='encrypt_string_names',
action='append',
help="Specify the variable name")
@@ -300,8 +302,13 @@ class VaultCLI(CLI):
# TODO: could prompt for which vault_id to use for each plaintext string
# currently, it will just be the default
- # could use private=True for shadowed input if useful
- prompt_response = display.prompt(msg)
+ hide_input = not context.CLIARGS['show_string_input']
+ if hide_input:
+ msg = "String to encrypt (hidden): "
+ else:
+ msg = "String to encrypt:"
+
+ prompt_response = display.prompt(msg, private=hide_input)
if prompt_response == '':
raise AnsibleOptionsError('The plaintext provided from the prompt was empty, not encrypting')
diff --git a/lib/ansible/collections/list.py b/lib/ansible/collections/list.py
index a1d99017..c6af77a3 100644
--- a/lib/ansible/collections/list.py
+++ b/lib/ansible/collections/list.py
@@ -69,33 +69,34 @@ def list_collection_dirs(search_paths=None, coll_filter=None):
collections = defaultdict(dict)
for path in list_valid_collection_paths(search_paths):
- b_path = to_bytes(path)
- if os.path.isdir(b_path):
- b_coll_root = to_bytes(os.path.join(path, 'ansible_collections'))
+ if os.path.basename(path) != 'ansible_collections':
+ path = os.path.join(path, 'ansible_collections')
+
+ b_coll_root = to_bytes(path, errors='surrogate_or_strict')
- if os.path.exists(b_coll_root) and os.path.isdir(b_coll_root):
+ if os.path.exists(b_coll_root) and os.path.isdir(b_coll_root):
- if namespace is None:
- namespaces = os.listdir(b_coll_root)
- else:
- namespaces = [namespace]
+ if namespace is None:
+ namespaces = os.listdir(b_coll_root)
+ else:
+ namespaces = [namespace]
- for ns in namespaces:
- b_namespace_dir = os.path.join(b_coll_root, to_bytes(ns))
+ for ns in namespaces:
+ b_namespace_dir = os.path.join(b_coll_root, to_bytes(ns))
- if os.path.isdir(b_namespace_dir):
+ if os.path.isdir(b_namespace_dir):
- if collection is None:
- colls = os.listdir(b_namespace_dir)
- else:
- colls = [collection]
+ if collection is None:
+ colls = os.listdir(b_namespace_dir)
+ else:
+ colls = [collection]
- for mycoll in colls:
+ for mycoll in colls:
- # skip dupe collections as they will be masked in execution
- if mycoll not in collections[ns]:
- b_coll = to_bytes(mycoll)
- b_coll_dir = os.path.join(b_namespace_dir, b_coll)
- if is_collection_path(b_coll_dir):
- collections[ns][mycoll] = b_coll_dir
- yield b_coll_dir
+ # skip dupe collections as they will be masked in execution
+ if mycoll not in collections[ns]:
+ b_coll = to_bytes(mycoll)
+ b_coll_dir = os.path.join(b_namespace_dir, b_coll)
+ if is_collection_path(b_coll_dir):
+ collections[ns][mycoll] = b_coll_dir
+ yield b_coll_dir
diff --git a/lib/ansible/compat/selectors/__init__.py b/lib/ansible/compat/selectors/__init__.py
index 6bbf6d8b..a7b260e3 100644
--- a/lib/ansible/compat/selectors/__init__.py
+++ b/lib/ansible/compat/selectors/__init__.py
@@ -19,6 +19,8 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+# NOT_BUNDLED
+
'''
Compat selectors library. Python-3.5 has this builtin. The selectors2
package exists on pypi to backport the functionality as far as python-2.6.
diff --git a/lib/ansible/config/ansible_builtin_runtime.yml b/lib/ansible/config/ansible_builtin_runtime.yml
index d02a3047..36607a91 100644
--- a/lib/ansible/config/ansible_builtin_runtime.yml
+++ b/lib/ansible/config/ansible_builtin_runtime.yml
@@ -14,7 +14,7 @@ plugin_routing:
chroot:
redirect: community.general.chroot
docker:
- redirect: community.general.docker
+ redirect: community.docker.docker
funcd:
redirect: community.general.funcd
iocage:
@@ -22,7 +22,7 @@ plugin_routing:
jail:
redirect: community.general.jail
kubectl:
- redirect: community.kubernetes.kubectl
+ redirect: kubernetes.core.kubectl
libvirt_lxc:
redirect: community.libvirt.libvirt_lxc
lxc:
@@ -30,7 +30,7 @@ plugin_routing:
lxd:
redirect: community.general.lxd
oc:
- redirect: community.general.oc
+ redirect: community.okd.oc
qubes:
redirect: community.general.qubes
saltstack:
@@ -719,47 +719,47 @@ plugin_routing:
docker_service:
redirect: community.general.docker_service
docker_compose:
- redirect: community.general.docker_compose
+ redirect: community.docker.docker_compose
docker_config:
- redirect: community.general.docker_config
+ redirect: community.docker.docker_config
docker_container:
- redirect: community.general.docker_container
+ redirect: community.docker.docker_container
docker_container_info:
- redirect: community.general.docker_container_info
+ redirect: community.docker.docker_container_info
docker_host_info:
- redirect: community.general.docker_host_info
+ redirect: community.docker.docker_host_info
docker_image:
- redirect: community.general.docker_image
+ redirect: community.docker.docker_image
docker_image_info:
- redirect: community.general.docker_image_info
+ redirect: community.docker.docker_image_info
docker_login:
- redirect: community.general.docker_login
+ redirect: community.docker.docker_login
docker_network:
- redirect: community.general.docker_network
+ redirect: community.docker.docker_network
docker_network_info:
- redirect: community.general.docker_network_info
+ redirect: community.docker.docker_network_info
docker_node:
- redirect: community.general.docker_node
+ redirect: community.docker.docker_node
docker_node_info:
- redirect: community.general.docker_node_info
+ redirect: community.docker.docker_node_info
docker_prune:
- redirect: community.general.docker_prune
+ redirect: community.docker.docker_prune
docker_secret:
- redirect: community.general.docker_secret
+ redirect: community.docker.docker_secret
docker_stack:
- redirect: community.general.docker_stack
+ redirect: community.docker.docker_stack
docker_swarm:
- redirect: community.general.docker_swarm
+ redirect: community.docker.docker_swarm
docker_swarm_info:
- redirect: community.general.docker_swarm_info
+ redirect: community.docker.docker_swarm_info
docker_swarm_service:
- redirect: community.general.docker_swarm_service
+ redirect: community.docker.docker_swarm_service
docker_swarm_service_info:
- redirect: community.general.docker_swarm_service_info
+ redirect: community.docker.docker_swarm_service_info
docker_volume:
- redirect: community.general.docker_volume
+ redirect: community.docker.docker_volume
docker_volume_info:
- redirect: community.general.docker_volume_info
+ redirect: community.docker.docker_volume_info
gcdns_record:
redirect: community.general.gcdns_record
gcdns_zone:
@@ -885,31 +885,31 @@ plugin_routing:
gcspanner:
redirect: community.general.gcspanner
gc_storage:
- redirect: community.general.gc_storage
+ redirect: community.google.gc_storage
gce_eip:
- redirect: community.general.gce_eip
+ redirect: community.google.gce_eip
gce_img:
- redirect: community.general.gce_img
+ redirect: community.google.gce_img
gce_instance_template:
- redirect: community.general.gce_instance_template
+ redirect: community.google.gce_instance_template
gce_labels:
- redirect: community.general.gce_labels
+ redirect: community.google.gce_labels
gce_lb:
- redirect: community.general.gce_lb
+ redirect: community.google.gce_lb
gce_mig:
- redirect: community.general.gce_mig
+ redirect: community.google.gce_mig
gce_net:
- redirect: community.general.gce_net
+ redirect: community.google.gce_net
gce_pd:
- redirect: community.general.gce_pd
+ redirect: community.google.gce_pd
gce_snapshot:
- redirect: community.general.gce_snapshot
+ redirect: community.google.gce_snapshot
gce_tag:
- redirect: community.general.gce_tag
+ redirect: community.google.gce_tag
gcpubsub:
- redirect: community.general.gcpubsub
+ redirect: community.google.gcpubsub
gcpubsub_info:
- redirect: community.general.gcpubsub_info
+ redirect: community.google.gcpubsub_info
heroku_collaborator:
redirect: community.general.heroku_collaborator
hwc_ecs_instance:
@@ -937,17 +937,17 @@ plugin_routing:
hwc_vpc_subnet:
redirect: community.general.hwc_vpc_subnet
kubevirt_cdi_upload:
- redirect: community.general.kubevirt_cdi_upload
+ redirect: community.kubevirt.kubevirt_cdi_upload
kubevirt_preset:
- redirect: community.general.kubevirt_preset
+ redirect: community.kubevirt.kubevirt_preset
kubevirt_pvc:
- redirect: community.general.kubevirt_pvc
+ redirect: community.kubevirt.kubevirt_pvc
kubevirt_rs:
- redirect: community.general.kubevirt_rs
+ redirect: community.kubevirt.kubevirt_rs
kubevirt_template:
- redirect: community.general.kubevirt_template
+ redirect: community.kubevirt.kubevirt_template
kubevirt_vm:
- redirect: community.general.kubevirt_vm
+ redirect: community.kubevirt.kubevirt_vm
linode:
redirect: community.general.linode
linode_v4:
@@ -1403,49 +1403,49 @@ plugin_routing:
mysql_variables:
redirect: community.mysql.mysql_variables
postgresql_copy:
- redirect: community.general.postgresql_copy
+ redirect: community.postgresql.postgresql_copy
postgresql_db:
- redirect: community.general.postgresql_db
+ redirect: community.postgresql.postgresql_db
postgresql_ext:
- redirect: community.general.postgresql_ext
+ redirect: community.postgresql.postgresql_ext
postgresql_idx:
- redirect: community.general.postgresql_idx
+ redirect: community.postgresql.postgresql_idx
postgresql_info:
- redirect: community.general.postgresql_info
+ redirect: community.postgresql.postgresql_info
postgresql_lang:
- redirect: community.general.postgresql_lang
+ redirect: community.postgresql.postgresql_lang
postgresql_membership:
- redirect: community.general.postgresql_membership
+ redirect: community.postgresql.postgresql_membership
postgresql_owner:
- redirect: community.general.postgresql_owner
+ redirect: community.postgresql.postgresql_owner
postgresql_pg_hba:
- redirect: community.general.postgresql_pg_hba
+ redirect: community.postgresql.postgresql_pg_hba
postgresql_ping:
- redirect: community.general.postgresql_ping
+ redirect: community.postgresql.postgresql_ping
postgresql_privs:
- redirect: community.general.postgresql_privs
+ redirect: community.postgresql.postgresql_privs
postgresql_publication:
- redirect: community.general.postgresql_publication
+ redirect: community.postgresql.postgresql_publication
postgresql_query:
- redirect: community.general.postgresql_query
+ redirect: community.postgresql.postgresql_query
postgresql_schema:
- redirect: community.general.postgresql_schema
+ redirect: community.postgresql.postgresql_schema
postgresql_sequence:
- redirect: community.general.postgresql_sequence
+ redirect: community.postgresql.postgresql_sequence
postgresql_set:
- redirect: community.general.postgresql_set
+ redirect: community.postgresql.postgresql_set
postgresql_slot:
- redirect: community.general.postgresql_slot
+ redirect: community.postgresql.postgresql_slot
postgresql_subscription:
- redirect: community.general.postgresql_subscription
+ redirect: community.postgresql.postgresql_subscription
postgresql_table:
- redirect: community.general.postgresql_table
+ redirect: community.postgresql.postgresql_table
postgresql_tablespace:
- redirect: community.general.postgresql_tablespace
+ redirect: community.postgresql.postgresql_tablespace
postgresql_user:
- redirect: community.general.postgresql_user
+ redirect: community.postgresql.postgresql_user
postgresql_user_obj_stat_info:
- redirect: community.general.postgresql_user_obj_stat_info
+ redirect: community.postgresql.postgresql_user_obj_stat_info
proxysql_backend_servers:
redirect: community.proxysql.proxysql_backend_servers
proxysql_global_variables:
@@ -1659,13 +1659,13 @@ plugin_routing:
haproxy:
redirect: community.general.haproxy
hetzner_failover_ip:
- redirect: community.general.hetzner_failover_ip
+ redirect: community.hrobot.failover_ip
hetzner_failover_ip_info:
- redirect: community.general.hetzner_failover_ip_info
+ redirect: community.hrobot.failover_ip_info
hetzner_firewall:
- redirect: community.general.hetzner_firewall
+ redirect: community.hrobot.firewall
hetzner_firewall_info:
- redirect: community.general.hetzner_firewall_info
+ redirect: community.hrobot.firewall_info
infinity:
redirect: community.general.infinity
ip_netns:
@@ -2183,61 +2183,61 @@ plugin_routing:
removal_date: 2019-11-06
warning_text: bigip_gtm_facts has been removed please use bigip_device_info module.
faz_device:
- redirect: community.network.faz_device
+ redirect: community.fortios.faz_device
fmgr_device:
- redirect: community.network.fmgr_device
+ redirect: community.fortios.fmgr_device
fmgr_device_config:
- redirect: community.network.fmgr_device_config
+ redirect: community.fortios.fmgr_device_config
fmgr_device_group:
- redirect: community.network.fmgr_device_group
+ redirect: community.fortios.fmgr_device_group
fmgr_device_provision_template:
- redirect: community.network.fmgr_device_provision_template
+ redirect: community.fortios.fmgr_device_provision_template
fmgr_fwobj_address:
- redirect: community.network.fmgr_fwobj_address
+ redirect: community.fortios.fmgr_fwobj_address
fmgr_fwobj_ippool:
- redirect: community.network.fmgr_fwobj_ippool
+ redirect: community.fortios.fmgr_fwobj_ippool
fmgr_fwobj_ippool6:
- redirect: community.network.fmgr_fwobj_ippool6
+ redirect: community.fortios.fmgr_fwobj_ippool6
fmgr_fwobj_service:
- redirect: community.network.fmgr_fwobj_service
+ redirect: community.fortios.fmgr_fwobj_service
fmgr_fwobj_vip:
- redirect: community.network.fmgr_fwobj_vip
+ redirect: community.fortios.fmgr_fwobj_vip
fmgr_fwpol_ipv4:
- redirect: community.network.fmgr_fwpol_ipv4
+ redirect: community.fortios.fmgr_fwpol_ipv4
fmgr_fwpol_package:
- redirect: community.network.fmgr_fwpol_package
+ redirect: community.fortios.fmgr_fwpol_package
fmgr_ha:
- redirect: community.network.fmgr_ha
+ redirect: community.fortios.fmgr_ha
fmgr_provisioning:
- redirect: community.network.fmgr_provisioning
+ redirect: community.fortios.fmgr_provisioning
fmgr_query:
- redirect: community.network.fmgr_query
+ redirect: community.fortios.fmgr_query
fmgr_script:
- redirect: community.network.fmgr_script
+ redirect: community.fortios.fmgr_script
fmgr_secprof_appctrl:
- redirect: community.network.fmgr_secprof_appctrl
+ redirect: community.fortios.fmgr_secprof_appctrl
fmgr_secprof_av:
- redirect: community.network.fmgr_secprof_av
+ redirect: community.fortios.fmgr_secprof_av
fmgr_secprof_dns:
- redirect: community.network.fmgr_secprof_dns
+ redirect: community.fortios.fmgr_secprof_dns
fmgr_secprof_ips:
- redirect: community.network.fmgr_secprof_ips
+ redirect: community.fortios.fmgr_secprof_ips
fmgr_secprof_profile_group:
- redirect: community.network.fmgr_secprof_profile_group
+ redirect: community.fortios.fmgr_secprof_profile_group
fmgr_secprof_proxy:
- redirect: community.network.fmgr_secprof_proxy
+ redirect: community.fortios.fmgr_secprof_proxy
fmgr_secprof_spam:
- redirect: community.network.fmgr_secprof_spam
+ redirect: community.fortios.fmgr_secprof_spam
fmgr_secprof_ssl_ssh:
- redirect: community.network.fmgr_secprof_ssl_ssh
+ redirect: community.fortios.fmgr_secprof_ssl_ssh
fmgr_secprof_voip:
- redirect: community.network.fmgr_secprof_voip
+ redirect: community.fortios.fmgr_secprof_voip
fmgr_secprof_waf:
- redirect: community.network.fmgr_secprof_waf
+ redirect: community.fortios.fmgr_secprof_waf
fmgr_secprof_wanopt:
- redirect: community.network.fmgr_secprof_wanopt
+ redirect: community.fortios.fmgr_secprof_wanopt
fmgr_secprof_web:
- redirect: community.network.fmgr_secprof_web
+ redirect: community.fortios.fmgr_secprof_web
ftd_configuration:
redirect: community.network.ftd_configuration
ftd_file_download:
@@ -2451,15 +2451,15 @@ plugin_routing:
nos_facts:
redirect: community.network.nos_facts
nso_action:
- redirect: community.network.nso_action
+ redirect: cisco.nso.nso_action
nso_config:
- redirect: community.network.nso_config
+ redirect: cisco.nso.nso_config
nso_query:
- redirect: community.network.nso_query
+ redirect: cisco.nso.nso_query
nso_show:
- redirect: community.network.nso_show
+ redirect: cisco.nso.nso_show
nso_verify:
- redirect: community.network.nso_verify
+ redirect: cisco.nso.nso_verify
nuage_vspk:
redirect: community.network.nuage_vspk
onyx_aaa:
@@ -2593,9 +2593,9 @@ plugin_routing:
vdirect_runnable:
redirect: community.network.vdirect_runnable
routeros_command:
- redirect: community.network.routeros_command
+ redirect: community.routeros.command
routeros_facts:
- redirect: community.network.routeros_facts
+ redirect: community.routeros.facts
slxos_command:
redirect: community.network.slxos_command
slxos_config:
@@ -2783,11 +2783,11 @@ plugin_routing:
cobbler_system:
redirect: community.general.cobbler_system
idrac_firmware:
- redirect: community.general.idrac_firmware
+ redirect: dellemc.openmanage.idrac_firmware
idrac_server_config_profile:
- redirect: community.general.idrac_server_config_profile
+ redirect: dellemc.openmanage.idrac_server_config_profile
ome_device_info:
- redirect: community.general.ome_device_info
+ redirect: dellemc.openmanage.ome_device_info
foreman:
redirect: community.general.foreman
katello:
@@ -3249,23 +3249,23 @@ plugin_routing:
grafana_plugin:
redirect: community.grafana.grafana_plugin
k8s_facts:
- redirect: community.kubernetes.k8s_facts
+ redirect: kubernetes.core.k8s_facts
k8s_raw:
- redirect: community.kubernetes.k8s_raw
+ redirect: kubernetes.core.k8s_raw
k8s:
- redirect: community.kubernetes.k8s
+ redirect: kubernetes.core.k8s
k8s_auth:
- redirect: community.kubernetes.k8s_auth
+ redirect: kubernetes.core.k8s_auth
k8s_info:
- redirect: community.kubernetes.k8s_info
+ redirect: kubernetes.core.k8s_info
k8s_scale:
- redirect: community.kubernetes.k8s_scale
+ redirect: kubernetes.core.k8s_scale
k8s_service:
- redirect: community.kubernetes.k8s_service
+ redirect: kubernetes.core.k8s_service
openshift_raw:
- redirect: community.kubernetes.openshift_raw
+ redirect: kubernetes.core.openshift_raw
openshift_scale:
- redirect: community.kubernetes.openshift_scale
+ redirect: kubernetes.core.openshift_scale
openssh_cert:
redirect: community.crypto.openssh_cert
openssl_pkcs12:
@@ -3326,6 +3326,14 @@ plugin_routing:
redirect: community.mongodb.mongodb_user
mongodb_shard:
redirect: community.mongodb.mongodb_shard
+ vmware_appliance_access_info:
+ redirect: vmware.vmware_rest.vmware_appliance_access_info
+ vmware_appliance_health_info:
+ redirect: vmware.vmware_rest.vmware_appliance_health_info
+ vmware_cis_category_info:
+ redirect: vmware.vmware_rest.vmware_cis_category_info
+ vmware_core_info:
+ redirect: vmware.vmware_rest.vmware_core_info
vcenter_extension_facts:
redirect: community.vmware.vcenter_extension_facts
vmware_about_facts:
@@ -7618,11 +7626,11 @@ plugin_routing:
dimensiondata:
redirect: community.general.dimensiondata
docker:
- redirect: community.general.docker
+ redirect: community.docker.common
docker.common:
- redirect: community.general.docker.common
+ redirect: community.docker.common
docker.swarm:
- redirect: community.general.docker.swarm
+ redirect: community.docker.swarm
ec2:
redirect: amazon.aws.ec2
ecs:
@@ -7637,11 +7645,11 @@ plugin_routing:
firewalld:
redirect: ansible.posix.firewalld
gcdns:
- redirect: community.general.gcdns
+ redirect: community.google.gcdns
gce:
- redirect: community.general.gce
+ redirect: community.google.gce
gcp:
- redirect: community.general.gcp
+ redirect: community.google.gcp
gcp_utils:
redirect: google.cloud.gcp_utils
gitlab:
@@ -7651,7 +7659,7 @@ plugin_routing:
heroku:
redirect: community.general.heroku
hetzner:
- redirect: community.general.hetzner
+ redirect: community.hrobot.robot
hwc_utils:
redirect: community.general.hwc_utils
ibm_sa_utils:
@@ -7671,15 +7679,15 @@ plugin_routing:
ismount:
redirect: ansible.posix.mount
k8s.common:
- redirect: community.kubernetes.common
+ redirect: kubernetes.core.common
k8s.raw:
- redirect: community.kubernetes.raw
+ redirect: kubernetes.core.raw
k8s.scale:
- redirect: community.kubernetes.scale
+ redirect: kubernetes.core.scale
known_hosts:
redirect: community.general.known_hosts
kubevirt:
- redirect: community.general.kubevirt
+ redirect: community.kubevirt.kubevirt
ldap:
redirect: community.general.ldap
linode:
@@ -7956,9 +7964,9 @@ plugin_routing:
network.f5.urls:
redirect: f5networks.f5_modules.urls
network.fortianalyzer.common:
- redirect: community.network.network.fortianalyzer.common
+ redirect: community.fortios.fortianalyzer.common
network.fortianalyzer.fortianalyzer:
- redirect: community.network.network.fortianalyzer.fortianalyzer
+ redirect: community.fortios.fortianalyzer.fortianalyzer
network.fortimanager.common:
redirect: fortinet.fortimanager.common
network.fortimanager.fortimanager:
@@ -8476,9 +8484,9 @@ plugin_routing:
network.nos.nos:
redirect: community.network.network.nos.nos
network.nso:
- redirect: community.network.network.nso
+ redirect: cisco.nso.nso
network.nso.nso:
- redirect: community.network.network.nso.nso
+ redirect: cisco.nso.nso
network.nxos:
redirect: cisco.nxos.network.nxos
network.nxos.argspec:
@@ -8650,9 +8658,9 @@ plugin_routing:
network.restconf.restconf:
redirect: ansible.netcommon.network.restconf.restconf
network.routeros:
- redirect: community.network.network.routeros
+ redirect: community.routeros.routeros
network.routeros.routeros:
- redirect: community.network.network.routeros.routeros
+ redirect: community.routeros.routeros
network.skydive:
redirect: community.skydive.network.skydive
network.skydive.api:
@@ -8774,7 +8782,7 @@ plugin_routing:
podman.common:
redirect: containers.podman.podman.common
postgres:
- redirect: community.general.postgres
+ redirect: community.postgresql.postgres
pure:
redirect: community.general.pure
rabbitmq:
@@ -8786,11 +8794,11 @@ plugin_routing:
redhat:
redirect: community.general.redhat
remote_management.dellemc:
- redirect: community.general.remote_management.dellemc
+ redirect: dellemc.openmanage
remote_management.dellemc.dellemc_idrac:
- redirect: community.general.remote_management.dellemc.dellemc_idrac
+ redirect: dellemc.openmanage.dellemc_idrac
remote_management.dellemc.ome:
- redirect: community.general.remote_management.dellemc.ome
+ redirect: dellemc.openmanage.ome
remote_management.intersight:
redirect: cisco.intersight.intersight
remote_management.lxca:
@@ -8870,7 +8878,7 @@ plugin_routing:
onyx:
redirect: mellanox.onyx.onyx
routeros:
- redirect: community.network.routeros
+ redirect: community.routeros.routeros
slxos:
redirect: community.network.slxos
voss:
@@ -8929,7 +8937,7 @@ plugin_routing:
onyx:
redirect: mellanox.onyx.onyx
routeros:
- redirect: community.network.routeros
+ redirect: community.routeros.routeros
slxos:
redirect: community.network.slxos
sros:
@@ -9212,7 +9220,7 @@ plugin_routing:
dimensiondata_wait:
redirect: community.general.dimensiondata_wait
docker:
- redirect: community.general.docker
+ redirect: community.docker.docker
emc:
redirect: community.general.emc
enos:
@@ -9222,7 +9230,7 @@ plugin_routing:
gcp:
redirect: google.cloud.gcp
hetzner:
- redirect: community.general.hetzner
+ redirect: community.hrobot.robot
hpe3par:
redirect: community.general.hpe3par
hwc:
@@ -9242,9 +9250,9 @@ plugin_routing:
keycloak:
redirect: community.general.keycloak
kubevirt_common_options:
- redirect: community.general.kubevirt_common_options
+ redirect: community.kubevirt.kubevirt_common_options
kubevirt_vm_options:
- redirect: community.general.kubevirt_vm_options
+ redirect: community.kubevirt.kubevirt_vm_options
ldap:
redirect: community.general.ldap
lxca_common:
@@ -9258,7 +9266,7 @@ plugin_routing:
nios:
redirect: community.general.nios
nso:
- redirect: community.network.nso
+ redirect: cisco.nso.nso
oneview:
redirect: community.general.oneview
online:
@@ -9286,7 +9294,7 @@ plugin_routing:
panos:
redirect: community.network.panos
postgres:
- redirect: community.general.postgres
+ redirect: community.postgresql.postgres
proxysql:
redirect: community.proxysql.proxysql
purestorage:
@@ -9310,19 +9318,27 @@ plugin_routing:
zabbix:
redirect: community.zabbix.zabbix
k8s_auth_options:
- redirect: community.kubernetes.k8s_auth_options
+ redirect: kubernetes.core.k8s_auth_options
k8s_name_options:
- redirect: community.kubernetes.k8s_name_options
+ redirect: kubernetes.core.k8s_name_options
k8s_resource_options:
- redirect: community.kubernetes.k8s_resource_options
+ redirect: kubernetes.core.k8s_resource_options
k8s_scale_options:
- redirect: community.kubernetes.k8s_scale_options
+ redirect: kubernetes.core.k8s_scale_options
k8s_state_options:
- redirect: community.kubernetes.k8s_state_options
+ redirect: kubernetes.core.k8s_state_options
acme:
redirect: community.crypto.acme
ecs_credential:
redirect: community.crypto.ecs_credential
+ VmwareRestModule:
+ redirect: vmware.vmware_rest.VmwareRestModule
+ VmwareRestModule_filters:
+ redirect: vmware.vmware_rest.VmwareRestModule_filters
+ VmwareRestModule_full:
+ redirect: vmware.vmware_rest.VmwareRestModule_full
+ VmwareRestModule_state:
+ redirect: vmware.vmware_rest.VmwareRestModule_state
vca:
redirect: community.vmware.vca
vmware:
@@ -9416,7 +9432,7 @@ plugin_routing:
random_mac:
redirect: community.general.random_mac
k8s_config_resource_name:
- redirect: community.kubernetes.k8s_config_resource_name
+ redirect: kubernetes.core.k8s_config_resource_name
cidr_merge:
redirect: ansible.netcommon.cidr_merge
ipaddr:
@@ -9467,7 +9483,7 @@ plugin_routing:
exos:
redirect: community.network.exos
fortianalyzer:
- redirect: community.network.fortianalyzer
+ redirect: community.fortios.fortianalyzer
fortimanager:
redirect: fortinet.fortimanager.fortimanager
ftd:
@@ -9495,13 +9511,13 @@ plugin_routing:
cloudscale:
redirect: cloudscale_ch.cloud.inventory
docker_machine:
- redirect: community.general.docker_machine
+ redirect: community.docker.docker_machine
docker_swarm:
- redirect: community.general.docker_swarm
+ redirect: community.docker.docker_swarm
gitlab_runners:
redirect: community.general.gitlab_runners
kubevirt:
- redirect: community.general.kubevirt
+ redirect: community.kubevirt.kubevirt
linode:
redirect: community.general.linode
nmap:
@@ -9515,9 +9531,9 @@ plugin_routing:
vultr:
redirect: ngine_io.vultr.vultr
k8s:
- redirect: community.kubernetes.k8s
+ redirect: kubernetes.core.k8s
openshift:
- redirect: community.kubernetes.openshift
+ redirect: kubernetes.core.openshift
vmware_vm_inventory:
redirect: community.vmware.vmware_vm_inventory
aws_ec2:
@@ -9567,9 +9583,9 @@ plugin_routing:
flattened:
redirect: community.general.flattened
gcp_storage_file:
- redirect: community.general.gcp_storage_file
+ redirect: community.google.gcp_storage_file
hashi_vault:
- redirect: community.general.hashi_vault
+ redirect: community.hashi_vault.hashi_vault
hiera:
redirect: community.general.hiera
keyring:
@@ -9601,9 +9617,9 @@ plugin_routing:
grafana_dashboard:
redirect: community.grafana.grafana_dashboard
openshift:
- redirect: community.kubernetes.openshift
+ redirect: kubernetes.core.openshift
k8s:
- redirect: community.kubernetes.k8s
+ redirect: kubernetes.core.k8s
mongodb:
redirect: community.mongodb.mongodb
laps_password:
diff --git a/lib/ansible/config/base.yml b/lib/ansible/config/base.yml
index 2ad9d72f..9cf3aeed 100644
--- a/lib/ansible/config/base.yml
+++ b/lib/ansible/config/base.yml
@@ -3,20 +3,16 @@
---
ALLOW_WORLD_READABLE_TMPFILES:
name: Allow world-readable temporary files
- deprecated:
- why: moved to a per plugin approach that is more flexible
- version: "2.14"
- alternatives: mostly the same config will work, but now controlled from the plugin itself and not using the general constant.
- default: False
description:
- - This makes the temporary files created on the machine world-readable and will issue a warning instead of failing the task.
- - It is useful when becoming an unprivileged user.
- env: []
- ini:
- - {key: allow_world_readable_tmpfiles, section: defaults}
+ - This setting has been moved to the individual shell plugins as a plugin option :ref:`shell_plugins`.
+ - The existing configuration settings are still accepted with the shell plugin adding additional options, like variables.
+ - This message will be removed in 2.14.
type: boolean
- yaml: {key: defaults.allow_world_readable_tmpfiles}
- version_added: "2.1"
+ default: False
+ deprecated: # (kept for autodetection and removal, deprecation is irrelevant since w/o settings this can never show runtime msg)
+ why: moved to shell plugins
+ version: "2.14"
+ alternatives: 'world_readable_tmp'
ANSIBLE_CONNECTION_PATH:
name: Path of ansible-connection script
default: null
@@ -36,15 +32,29 @@ ANSIBLE_COW_SELECTION:
env: [{name: ANSIBLE_COW_SELECTION}]
ini:
- {key: cow_selection, section: defaults}
-ANSIBLE_COW_WHITELIST:
- name: Cowsay filter whitelist
+ANSIBLE_COW_ACCEPTLIST:
+ name: Cowsay filter acceptance list
default: ['bud-frogs', 'bunny', 'cheese', 'daemon', 'default', 'dragon', 'elephant-in-snake', 'elephant', 'eyes', 'hellokitty', 'kitty', 'luke-koala', 'meow', 'milk', 'moofasa', 'moose', 'ren', 'sheep', 'small', 'stegosaurus', 'stimpy', 'supermilker', 'three-eyes', 'turkey', 'turtle', 'tux', 'udder', 'vader-koala', 'vader', 'www']
description: White list of cowsay templates that are 'safe' to use, set to empty list if you want to enable all installed templates.
- env: [{name: ANSIBLE_COW_WHITELIST}]
+ env:
+ - name: ANSIBLE_COW_WHITELIST
+ deprecated:
+ why: normalizing names to new standard
+ version: "2.15"
+ alternatives: 'ANSIBLE_COW_ACCEPTLIST'
+ - name: ANSIBLE_COW_ACCEPTLIST
+ version_added: '2.11'
ini:
- - {key: cow_whitelist, section: defaults}
+ - key: cow_whitelist
+ section: defaults
+ deprecated:
+ why: normalizing names to new standard
+ version: "2.15"
+ alternatives: 'cowsay_enabled_stencils'
+ - key: cowsay_enabled_stencils
+ section: defaults
+ version_added: '2.11'
type: list
- yaml: {key: display.cowsay_whitelist}
ANSIBLE_FORCE_COLOR:
name: Force color output
default: False
@@ -58,7 +68,11 @@ ANSIBLE_NOCOLOR:
name: Suppress color output
default: False
description: This setting allows suppressing colorizing output, which is used to give a better indication of failure and status information.
- env: [{name: ANSIBLE_NOCOLOR}]
+ env:
+ - name: ANSIBLE_NOCOLOR
+ # this is generic convention for CLI programs
+ - name: NO_COLOR
+ version_added: '2.11'
ini:
- {key: nocolor, section: defaults}
type: boolean
@@ -91,70 +105,15 @@ ANSIBLE_PIPELINING:
- "However this conflicts with privilege escalation (become). For example, when using 'sudo:' operations you must first
disable 'requiretty' in /etc/sudoers on all managed hosts, which is why it is disabled by default."
- This option is disabled if ``ANSIBLE_KEEP_REMOTE_FILES`` is enabled.
+ - This is a global option, each connection plugin can override either by having more specific options or not supporting pipelining at all.
env:
- name: ANSIBLE_PIPELINING
- - name: ANSIBLE_SSH_PIPELINING
ini:
- - section: connection
+ - section: defaults
key: pipelining
- - section: ssh_connection
+ - section: connection
key: pipelining
type: boolean
- yaml: {key: plugins.connection.pipelining}
-ANSIBLE_SSH_ARGS:
- # TODO: move to ssh plugin
- default: -C -o ControlMaster=auto -o ControlPersist=60s
- description:
- - If set, this will override the Ansible default ssh arguments.
- - In particular, users may wish to raise the ControlPersist time to encourage performance. A value of 30 minutes may be appropriate.
- - Be aware that if `-o ControlPath` is set in ssh_args, the control path setting is not used.
- env: [{name: ANSIBLE_SSH_ARGS}]
- ini:
- - {key: ssh_args, section: ssh_connection}
- yaml: {key: ssh_connection.ssh_args}
-ANSIBLE_SSH_CONTROL_PATH:
- # TODO: move to ssh plugin
- default: null
- description:
- - This is the location to save ssh's ControlPath sockets, it uses ssh's variable substitution.
- - Since 2.3, if null, ansible will generate a unique hash. Use `%(directory)s` to indicate where to use the control dir path setting.
- - Before 2.3 it defaulted to `control_path=%(directory)s/ansible-ssh-%%h-%%p-%%r`.
- - Be aware that this setting is ignored if `-o ControlPath` is set in ssh args.
- env: [{name: ANSIBLE_SSH_CONTROL_PATH}]
- ini:
- - {key: control_path, section: ssh_connection}
- yaml: {key: ssh_connection.control_path}
-ANSIBLE_SSH_CONTROL_PATH_DIR:
- # TODO: move to ssh plugin
- default: ~/.ansible/cp
- description:
- - This sets the directory to use for ssh control path if the control path setting is null.
- - Also, provides the `%(directory)s` variable for the control path setting.
- env: [{name: ANSIBLE_SSH_CONTROL_PATH_DIR}]
- ini:
- - {key: control_path_dir, section: ssh_connection}
- yaml: {key: ssh_connection.control_path_dir}
-ANSIBLE_SSH_EXECUTABLE:
- # TODO: move to ssh plugin, note that ssh_utils refs this and needs to be updated if removed
- default: ssh
- description:
- - This defines the location of the ssh binary. It defaults to `ssh` which will use the first ssh binary available in $PATH.
- - This option is usually not required, it might be useful when access to system ssh is restricted,
- or when using ssh wrappers to connect to remote hosts.
- env: [{name: ANSIBLE_SSH_EXECUTABLE}]
- ini:
- - {key: ssh_executable, section: ssh_connection}
- yaml: {key: ssh_connection.ssh_executable}
- version_added: "2.2"
-ANSIBLE_SSH_RETRIES:
- # TODO: move to ssh plugin
- default: 0
- description: Number of attempts to establish a connection before we give up and report the host as 'UNREACHABLE'
- env: [{name: ANSIBLE_SSH_RETRIES}]
- ini:
- - {key: retries, section: ssh_connection}
- type: integer
- yaml: {key: ssh_connection.retries}
ANY_ERRORS_FATAL:
name: Make Task failures fatal
default: False
@@ -220,7 +179,8 @@ CACHE_PLUGIN_TIMEOUT:
type: integer
yaml: {key: facts.cache.timeout}
COLLECTIONS_SCAN_SYS_PATH:
- name: enable/disable scanning sys.path for installed collections
+ name: Scan PYTHONPATH for installed collections
+ description: A boolean to enable or disable scanning the sys.path for installed collections
default: true
type: boolean
env:
@@ -229,7 +189,12 @@ COLLECTIONS_SCAN_SYS_PATH:
- {key: collections_scan_sys_path, section: defaults}
COLLECTIONS_PATHS:
name: ordered list of root paths for loading installed Ansible collections content
- description: Colon separated paths in which Ansible will search for collections content.
+ description: >
+ Colon separated paths in which Ansible will search for collections content.
+ Collections must be in nested *subdirectories*, not directly in these directories.
+ For example, if ``COLLECTIONS_PATHS`` includes ``~/.ansible/collections``,
+ and you want to add ``my.collection`` to that directory, it must be saved as
+ ``~/.ansible/collections/ansible_collections/my/collection``.
default: ~/.ansible/collections:/usr/share/ansible/collections
type: pathspec
env:
@@ -252,15 +217,19 @@ COLLECTIONS_ON_ANSIBLE_VERSION_MISMATCH:
ini: [{key: collections_on_ansible_version_mismatch, section: defaults}]
choices: [error, warning, ignore]
default: warning
+_COLOR_DEFAULTS: &color
+ name: placeholder for color settings' defaults
+ choices: ['black', 'bright gray', 'blue', 'white', 'green', 'bright blue', 'cyan', 'bright green', 'red', 'bright cyan', 'purple', 'bright red', 'yellow', 'bright purple', 'dark gray', 'bright yellow', 'magenta', 'bright magenta', 'normal']
COLOR_CHANGED:
+ <<: *color
name: Color for 'changed' task status
default: yellow
description: Defines the color to use on 'Changed' task status
env: [{name: ANSIBLE_COLOR_CHANGED}]
ini:
- {key: changed, section: colors}
- yaml: {key: display.colors.changed}
COLOR_CONSOLE_PROMPT:
+ <<: *color
name: "Color for ansible-console's prompt task status"
default: white
description: Defines the default color to use for ansible-console
@@ -269,22 +238,23 @@ COLOR_CONSOLE_PROMPT:
- {key: console_prompt, section: colors}
version_added: "2.7"
COLOR_DEBUG:
+ <<: *color
name: Color for debug statements
default: dark gray
description: Defines the color to use when emitting debug messages
env: [{name: ANSIBLE_COLOR_DEBUG}]
ini:
- {key: debug, section: colors}
- yaml: {key: display.colors.debug}
COLOR_DEPRECATE:
+ <<: *color
name: Color for deprecation messages
default: purple
description: Defines the color to use when emitting deprecation messages
env: [{name: ANSIBLE_COLOR_DEPRECATE}]
ini:
- {key: deprecate, section: colors}
- yaml: {key: display.colors.deprecate}
COLOR_DIFF_ADD:
+ <<: *color
name: Color for diff added display
default: green
description: Defines the color to use when showing added lines in diffs
@@ -293,6 +263,7 @@ COLOR_DIFF_ADD:
- {key: diff_add, section: colors}
yaml: {key: display.colors.diff.add}
COLOR_DIFF_LINES:
+ <<: *color
name: Color for diff lines display
default: cyan
description: Defines the color to use when showing diffs
@@ -300,6 +271,7 @@ COLOR_DIFF_LINES:
ini:
- {key: diff_lines, section: colors}
COLOR_DIFF_REMOVE:
+ <<: *color
name: Color for diff removed display
default: red
description: Defines the color to use when showing removed lines in diffs
@@ -307,6 +279,7 @@ COLOR_DIFF_REMOVE:
ini:
- {key: diff_remove, section: colors}
COLOR_ERROR:
+ <<: *color
name: Color for error messages
default: red
description: Defines the color to use when emitting error messages
@@ -315,6 +288,7 @@ COLOR_ERROR:
- {key: error, section: colors}
yaml: {key: colors.error}
COLOR_HIGHLIGHT:
+ <<: *color
name: Color for highlighting
default: white
description: Defines the color to use for highlighting
@@ -322,6 +296,7 @@ COLOR_HIGHLIGHT:
ini:
- {key: highlight, section: colors}
COLOR_OK:
+ <<: *color
name: Color for 'ok' task status
default: green
description: Defines the color to use when showing 'OK' task status
@@ -329,6 +304,7 @@ COLOR_OK:
ini:
- {key: ok, section: colors}
COLOR_SKIP:
+ <<: *color
name: Color for 'skip' task status
default: cyan
description: Defines the color to use when showing 'Skipped' task status
@@ -336,6 +312,7 @@ COLOR_SKIP:
ini:
- {key: skip, section: colors}
COLOR_UNREACHABLE:
+ <<: *color
name: Color for 'unreachable' host state
default: bright red
description: Defines the color to use on 'Unreachable' status
@@ -343,6 +320,7 @@ COLOR_UNREACHABLE:
ini:
- {key: unreachable, section: colors}
COLOR_VERBOSE:
+ <<: *color
name: Color for verbose messages
default: blue
description: Defines the color to use when emitting verbose messages. i.e those that show with '-v's.
@@ -350,6 +328,7 @@ COLOR_VERBOSE:
ini:
- {key: verbose, section: colors}
COLOR_WARN:
+ <<: *color
name: Color for warning messages
default: bright purple
description: Defines the color to use when emitting warning messages
@@ -382,7 +361,7 @@ COVERAGE_REMOTE_OUTPUT:
- {name: _ansible_coverage_remote_output}
type: str
version_added: '2.9'
-COVERAGE_REMOTE_WHITELIST:
+COVERAGE_REMOTE_PATHS:
name: Sets the list of paths to run coverage for.
description:
- A list of paths for files on the Ansible controller to run coverage for when executing on the remote host.
@@ -392,7 +371,7 @@ COVERAGE_REMOTE_WHITELIST:
- This is for internal use only.
default: '*'
env:
- - {name: _ANSIBLE_COVERAGE_REMOTE_WHITELIST}
+ - {name: _ANSIBLE_COVERAGE_REMOTE_PATH_FILTER}
type: str
version_added: '2.9'
ACTION_WARNINGS:
@@ -408,15 +387,19 @@ ACTION_WARNINGS:
version_added: "2.5"
COMMAND_WARNINGS:
name: Command module warnings
- default: True
+ default: False
description:
- - By default Ansible will issue a warning when the shell or command module is used and the command appears to be similar to an existing Ansible module.
+ - Ansible can issue a warning when the shell or command module is used and the command appears to be similar to an existing Ansible module.
- These warnings can be silenced by adjusting this setting to False. You can also control this at the task level with the module option ``warn``.
+ - As of version 2.11, this is disabled by default.
env: [{name: ANSIBLE_COMMAND_WARNINGS}]
ini:
- {key: command_warnings, section: defaults}
type: boolean
version_added: "1.8"
+ deprecated:
+ why: the command warnings feature is being removed
+ version: "2.14"
LOCALHOST_WARNING:
name: Warning when using implicit inventory with only localhost
default: True
@@ -544,14 +527,38 @@ DEFAULT_CACHE_PLUGIN_PATH:
ini:
- {key: cache_plugins, section: defaults}
type: pathspec
-DEFAULT_CALLABLE_WHITELIST:
- name: Template 'callable' whitelist
+CALLABLE_ACCEPT_LIST:
+ name: Template 'callable' accept list
default: []
description: Whitelist of callable methods to be made available to template evaluation
- env: [{name: ANSIBLE_CALLABLE_WHITELIST}]
+ env:
+ - name: ANSIBLE_CALLABLE_WHITELIST
+ deprecated:
+ why: normalizing names to new standard
+ version: "2.15"
+ alternatives: 'ANSIBLE_CALLABLE_ENABLED'
+ - name: ANSIBLE_CALLABLE_ENABLED
+ version_added: '2.11'
ini:
- - {key: callable_whitelist, section: defaults}
+ - key: callable_whitelist
+ section: defaults
+ deprecated:
+ why: normalizing names to new standard
+ version: "2.15"
+ alternatives: 'callable_enabled'
+ - key: callable_enabled
+ section: defaults
+ version_added: '2.11'
type: list
+CONTROLLER_PYTHON_WARNING:
+ name: Running Older than Python 3.8 Warning
+ default: True
+ description: Toggle to control showing warnings related to running a Python version
+ older than Python 3.8 on the controller
+ env: [{name: ANSIBLE_CONTROLLER_PYTHON_WARNING}]
+ ini:
+ - {key: controller_python_warning, section: defaults}
+ type: boolean
DEFAULT_CALLBACK_PLUGIN_PATH:
name: Callback Plugins Path
default: ~/.ansible/plugins/callback:/usr/share/ansible/plugins/callback
@@ -561,17 +568,31 @@ DEFAULT_CALLBACK_PLUGIN_PATH:
- {key: callback_plugins, section: defaults}
type: pathspec
yaml: {key: plugins.callback.path}
-DEFAULT_CALLBACK_WHITELIST:
- name: Callback Whitelist
+CALLBACKS_ENABLED:
+ name: Enable callback plugins that require it.
default: []
description:
- - "List of whitelisted callbacks, not all callbacks need whitelisting,
+ - "List of enabled callbacks, not all callbacks need enabling,
but many of those shipped with Ansible do as we don't want them activated by default."
- env: [{name: ANSIBLE_CALLBACK_WHITELIST}]
+ env:
+ - name: ANSIBLE_CALLBACK_WHITELIST
+ deprecated:
+ why: normalizing names to new standard
+ version: "2.15"
+ alternatives: 'ANSIBLE_CALLBACKS_ENABLED'
+ - name: ANSIBLE_CALLBACKS_ENABLED
+ version_added: '2.11'
ini:
- - {key: callback_whitelist, section: defaults}
+ - key: callback_whitelist
+ section: defaults
+ deprecated:
+ why: normalizing names to new standard
+ version: "2.15"
+ alternatives: 'callback_enabled'
+ - key: callbacks_enabled
+ section: defaults
+ version_added: '2.11'
type: list
- yaml: {key: plugins.callback.whitelist}
DEFAULT_CLICONF_PLUGIN_PATH:
name: Cliconf Plugins Path
default: ~/.ansible/plugins/cliconf:/usr/share/ansible/plugins/cliconf
@@ -707,23 +728,29 @@ DEFAULT_HASH_BEHAVIOUR:
name: Hash merge behaviour
default: replace
type: string
- choices: ["replace", "merge"]
- description:
- - This setting controls how variables merge in Ansible.
- By default Ansible will override variables in specific precedence orders, as described in Variables.
- When a variable of higher precedence wins, it will replace the other value.
- - "Some users prefer that variables that are hashes (aka 'dictionaries' in Python terms) are merged.
- This setting is called 'merge'. This is not the default behavior and it does not affect variables whose values are scalars
- (integers, strings) or arrays. We generally recommend not using this setting unless you think you have an absolute need for it,
- and playbooks in the official examples repos do not use this setting"
- - In version 2.0 a ``combine`` filter was added to allow doing this for a particular variable (described in Filters).
+ choices:
+ replace: Any variable that is defined more than once is overwritten using the order from variable precedence rules (highest wins).
+ merge: Any dictionary variable will be recursively merged with new definitions across the different variable definition sources.
+ description:
+ - This setting controls how duplicate definitions of dictionary variables (aka hash, map, associative array) are handled in Ansible.
+ - This does not affect variables whose values are scalars (integers, strings) or arrays.
+ - "**WARNING**, changing this setting is not recommended as this is fragile and makes your content (plays, roles, collections) non portable,
+ leading to continual confusion and misuse. Don't change this setting unless you think you have an absolute need for it."
+ - We recommend avoiding reusing variable names and relying on the ``combine`` filter and ``vars`` and ``varnames`` lookups
+ to create merged versions of the individual variables. In our experience this is rarely really needed and a sign that too much
+ complexity has been introduced into the data structures and plays.
+ - For some uses you can also look into custom vars_plugins to merge on input, even substituting the default ``host_group_vars``
+ that is in charge of parsing the ``host_vars/`` and ``group_vars/`` directories. Most users of this setting are only interested in inventory scope,
+ but the setting itself affects all sources and makes debugging even harder.
+ - All playbooks and roles in the official examples repos assume the default for this setting.
+ - Changing the setting to ``merge`` applies across variable sources, but many sources will internally still overwrite the variables.
+ For example ``include_vars`` will dedupe variables internally before updating Ansible, with 'last defined' overwriting previous definitions in same file.
+ - The Ansible project recommends you **avoid ``merge`` for new projects.**
+ - It is the intention of the Ansible developers to eventually deprecate and remove this setting, but it is being kept as some users do heavily rely on it.
+ New projects should **avoid 'merge'**.
env: [{name: ANSIBLE_HASH_BEHAVIOUR}]
ini:
- {key: hash_behaviour, section: defaults}
- deprecated:
- why: this feature is fragile and not portable, leading to continual confusion and misuse
- version: "2.13"
- alternatives: the ``combine`` filter explicitly
DEFAULT_HOST_LIST:
name: Inventory Source
default: /etc/ansible/hosts
@@ -1006,16 +1033,6 @@ DEFAULT_ROLES_PATH:
- {key: roles_path, section: defaults}
type: pathspec
yaml: {key: defaults.roles_path}
-DEFAULT_SCP_IF_SSH:
- # TODO: move to ssh plugin
- default: smart
- description:
- - "Preferred method to use when transferring files over ssh."
- - When set to smart, Ansible will try them until one succeeds or they all fail.
- - If set to True, it will force 'scp', if False it will use 'sftp'.
- env: [{name: ANSIBLE_SCP_IF_SSH}]
- ini:
- - {key: scp_if_ssh, section: ssh_connection}
DEFAULT_SELINUX_SPECIAL_FS:
name: Problematic file systems
default: fuse, nfs, vboxsf, ramfs, 9p, vfat
@@ -1029,42 +1046,6 @@ DEFAULT_SELINUX_SPECIAL_FS:
ini:
- {key: special_context_filesystems, section: selinux}
type: list
-DEFAULT_SFTP_BATCH_MODE:
- # TODO: move to ssh plugin
- default: True
- description: 'TODO: write it'
- env: [{name: ANSIBLE_SFTP_BATCH_MODE}]
- ini:
- - {key: sftp_batch_mode, section: ssh_connection}
- type: boolean
- yaml: {key: ssh_connection.sftp_batch_mode}
-DEFAULT_SQUASH_ACTIONS:
- name: Squashable actions
- default: apk, apt, dnf, homebrew, openbsd_pkg, pacman, pip, pkgng, yum, zypper
- description:
- - Ansible can optimise actions that call modules that support list parameters when using ``with_`` looping.
- Instead of calling the module once for each item, the module is called once with the full list.
- - The default value for this setting is only for certain package managers, but it can be used for any module.
- - Currently, this is only supported for modules that have a name or pkg parameter, and only when the item is the only thing being passed to the parameter.
- env: [{name: ANSIBLE_SQUASH_ACTIONS}]
- ini:
- - {key: squash_actions, section: defaults}
- type: list
- version_added: "2.0"
- deprecated:
- why: Loop squashing is deprecated and this configuration will no longer be used
- version: "2.11"
- alternatives: a list directly with the module argument
-DEFAULT_SSH_TRANSFER_METHOD:
- # TODO: move to ssh plugin
- default:
- description: 'unused?'
- # - "Preferred method to use when transferring files over ssh"
- # - Setting to smart will try them until one succeeds or they all fail
- #choices: ['sftp', 'scp', 'dd', 'smart']
- env: [{name: ANSIBLE_SSH_TRANSFER_METHOD}]
- ini:
- - {key: transfer_method, section: ssh_connection}
DEFAULT_STDOUT_CALLBACK:
name: Main display callback plugin
default: default
@@ -1321,7 +1302,7 @@ DISPLAY_SKIPPED_HOSTS:
type: boolean
DOCSITE_ROOT_URL:
name: Root docsite URL
- default: https://docs.ansible.com/ansible/
+ default: https://docs.ansible.com/ansible-core/
description: Root docsite URL used to generate docs URLs in warning/error text;
must be an absolute URL with valid scheme and trailing slash.
ini:
@@ -1376,9 +1357,6 @@ CONNECTION_FACTS_MODULES:
ironware: ansible.legacy.ironware_facts
community.network.ironware: community.network.ironware_facts
description: "Which modules to run during a play's fact gathering stage based on connection"
- env: [{name: ANSIBLE_CONNECTION_FACTS_MODULES}]
- ini:
- - {key: connection_facts_modules, section: defaults}
type: dict
FACTS_MODULES:
name: Gather Facts Modules
@@ -1465,7 +1443,22 @@ GALAXY_DISPLAY_PROGRESS:
- {key: display_progress, section: galaxy}
type: bool
version_added: "2.10"
+GALAXY_CACHE_DIR:
+ default: ~/.ansible/galaxy_cache
+ description:
+ - The directory that stores cached responses from a Galaxy server.
+ - This is only used by the ``ansible-galaxy collection install`` and ``download`` commands.
+ - Cache files inside this dir will be ignored if they are world writable.
+ env:
+ - name: ANSIBLE_GALAXY_CACHE_DIR
+ ini:
+ - section: galaxy
+ key: cache_dir
+ type: path
+ version_added: '2.11'
HOST_KEY_CHECKING:
+ # note: constant not in use by ssh plugin anymore
+ # TODO: check non ssh connection plugins for use/migration
name: Check host keys
default: True
description: 'Set this to "False" if you want to avoid host key checking by the underlying tools Ansible uses to connect to the host'
@@ -1508,9 +1501,11 @@ INTERPRETER_PYTHON_DISTRO_MAP:
'6': /usr/bin/python
'8': /usr/libexec/platform-python
debian:
+ '8': /usr/bin/python
'10': /usr/bin/python3
fedora:
'23': /usr/bin/python3
+ oracle: *rhelish
redhat: *rhelish
rhel: *rhelish
ubuntu:
@@ -1523,6 +1518,8 @@ INTERPRETER_PYTHON_FALLBACK:
name: Ordered list of Python interpreters to check for in discovery
default:
- /usr/bin/python
+ - python3.9
+ - python3.8
- python3.7
- python3.6
- python3.5
@@ -1623,7 +1620,7 @@ INVENTORY_EXPORT:
type: bool
INVENTORY_IGNORE_EXTS:
name: Inventory ignore extensions
- default: "{{(BLACKLIST_EXTS + ('.orig', '.ini', '.cfg', '.retry'))}}"
+ default: "{{(REJECT_EXTS + ('.orig', '.ini', '.cfg', '.retry'))}}"
description: List of extensions to ignore when using a directory as an inventory source
env: [{name: ANSIBLE_INVENTORY_IGNORE}]
ini:
@@ -1685,10 +1682,10 @@ INJECT_FACTS_AS_VARS:
version_added: "2.5"
MODULE_IGNORE_EXTS:
name: Module ignore extensions
- default: "{{(BLACKLIST_EXTS + ('.yaml', '.yml', '.ini'))}}"
+ default: "{{(REJECT_EXTS + ('.yaml', '.yml', '.ini'))}}"
description:
- List of extensions to ignore when looking for modules to load
- - This is for blacklisting script and binary module fallback extensions
+ - This is for rejecting script and binary module fallback extensions
env: [{name: ANSIBLE_MODULE_IGNORE_EXTS}]
ini:
- {key: module_ignore_exts, section: defaults}
@@ -1918,7 +1915,7 @@ USE_PERSISTENT_CONNECTIONS:
- {key: use_persistent_connections, section: defaults}
type: boolean
VARIABLE_PLUGINS_ENABLED:
- name: Vars plugin whitelist
+ name: Vars plugin enabled list
default: ['host_group_vars']
description: Whitelist for variable plugins that require it.
env: [{name: ANSIBLE_VARS_ENABLED}]
diff --git a/lib/ansible/config/manager.py b/lib/ansible/config/manager.py
index 99fc49fd..db9f055c 100644
--- a/lib/ansible/config/manager.py
+++ b/lib/ansible/config/manager.py
@@ -24,7 +24,7 @@ except ImportError:
from ansible.config.data import ConfigData
from ansible.errors import AnsibleOptionsError, AnsibleError
from ansible.module_utils._text import to_text, to_bytes, to_native
-from ansible.module_utils.common._collections_compat import Sequence
+from ansible.module_utils.common._collections_compat import Mapping, Sequence
from ansible.module_utils.six import PY3, string_types
from ansible.module_utils.six.moves import configparser
from ansible.module_utils.parsing.convert_bool import boolean
@@ -98,7 +98,7 @@ def ensure_type(value, value_type, origin=None):
elif value_type == 'list':
if isinstance(value, string_types):
- value = [x.strip() for x in value.split(',')]
+ value = [unquote(x.strip()) for x in value.split(',')]
elif not isinstance(value, Sequence):
errmsg = 'list'
@@ -144,8 +144,12 @@ def ensure_type(value, value_type, origin=None):
else:
errmsg = 'pathlist'
+ elif value_type in ('dict', 'dictionary'):
+ if not isinstance(value, Mapping):
+ errmsg = 'dictionary'
+
elif value_type in ('str', 'string'):
- if isinstance(value, (string_types, AnsibleVaultEncryptedUnicode)):
+ if isinstance(value, (string_types, AnsibleVaultEncryptedUnicode, bool, int, float, complex)):
value = unquote(to_text(value, errors='surrogate_or_strict'))
else:
errmsg = 'string'
@@ -383,7 +387,7 @@ class ConfigManager(object):
return ret
- def get_configuration_definitions(self, plugin_type=None, name=None):
+ def get_configuration_definitions(self, plugin_type=None, name=None, ignore_private=False):
''' just list the possible settings, either base or for specific plugins or plugin '''
ret = {}
@@ -394,6 +398,11 @@ class ConfigManager(object):
else:
ret = self._plugins.get(plugin_type, {}).get(name, {})
+ if ignore_private:
+ for cdef in list(ret.keys()):
+ if cdef.startswith('_'):
+ del ret[cdef]
+
return ret
def _loop_entries(self, container, entry_list):
@@ -482,6 +491,12 @@ class ConfigManager(object):
if value is not None:
origin = 'keyword: %s' % keyword
+ if value is None and 'cli' in defs[config]:
+ # avoid circular import .. until valid
+ from ansible import context
+ value, origin = self._loop_entries(context.CLIARGS, defs[config]['cli'])
+ origin = 'cli: %s' % origin
+
# env vars are next precedence
if value is None and defs[config].get('env'):
value, origin = self._loop_entries(py3compat.environ, defs[config]['env'])
@@ -535,6 +550,20 @@ class ConfigManager(object):
raise AnsibleOptionsError('Invalid type for configuration option %s: %s' %
(to_native(_get_entry(plugin_type, plugin_name, config)), to_native(e)))
+ # deal with restricted values
+ if value is not None and 'choices' in defs[config] and defs[config]['choices'] is not None:
+ invalid_choices = True # assume the worst!
+ if defs[config].get('type') == 'list':
+ # for a list type, compare all values in type are allowed
+ invalid_choices = not all(choice in defs[config]['choices'] for choice in value)
+ else:
+ # these should be only the simple data types (string, int, bool, float, etc) .. ignore dicts for now
+ invalid_choices = value not in defs[config]['choices']
+
+ if invalid_choices:
+ raise AnsibleOptionsError('Invalid value "%s" for configuration option "%s", valid values are: %s' %
+ (value, to_native(_get_entry(plugin_type, plugin_name, config)), defs[config]['choices']))
+
# deal with deprecation of the setting
if 'deprecated' in defs[config] and origin != 'default':
self.DEPRECATED.append((config, defs[config].get('deprecated')))
diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py
index 575bed52..d4cee0c8 100644
--- a/lib/ansible/constants.py
+++ b/lib/ansible/constants.py
@@ -40,32 +40,6 @@ def _deprecated(msg, version='2.8'):
sys.stderr.write(' [DEPRECATED] %s, to be removed in %s\n' % (msg, version))
-def mk_boolean(value):
- ''' moved to module_utils'''
- _deprecated('ansible.constants.mk_boolean() is deprecated. Use ansible.module_utils.parsing.convert_bool.boolean() instead')
- return boolean(value, strict=False)
-
-
-def get_config(parser, section, key, env_var, default_value, value_type=None, expand_relative_paths=False):
- ''' kept for backwarsd compatibility, but deprecated '''
- _deprecated('ansible.constants.get_config() is deprecated. There is new config API, see porting docs.')
-
- value = None
- # small reconstruction of the old code env/ini/default
- value = os.environ.get(env_var, None)
- if value is None:
- try:
- value = get_ini_config_value(parser, {'key': key, 'section': section})
- except Exception:
- pass
- if value is None:
- value = default_value
-
- value = ensure_type(value, value_type)
-
- return value
-
-
def set_constant(name, value, export=vars()):
''' sets constants and returns resolved options dict '''
export[name] = value
@@ -86,17 +60,46 @@ class _DeprecatedSequenceConstant(Sequence):
return self._value[y]
-# Deprecated constants
-BECOME_METHODS = _DeprecatedSequenceConstant(
- ['sudo', 'su', 'pbrun', 'pfexec', 'doas', 'dzdo', 'ksu', 'runas', 'pmrun', 'enable', 'machinectl'],
- ('ansible.constants.BECOME_METHODS is deprecated, please use '
- 'ansible.plugins.loader.become_loader. This list is statically '
- 'defined and may not include all become methods'),
- '2.10'
-)
-
# CONSTANTS ### yes, actual ones
-BLACKLIST_EXTS = ('.pyc', '.pyo', '.swp', '.bak', '~', '.rpm', '.md', '.txt', '.rst')
+
+# The following are hard-coded action names
+_ACTION_DEBUG = add_internal_fqcns(('debug', ))
+_ACTION_IMPORT_PLAYBOOK = add_internal_fqcns(('import_playbook', ))
+_ACTION_IMPORT_ROLE = add_internal_fqcns(('import_role', ))
+_ACTION_IMPORT_TASKS = add_internal_fqcns(('import_tasks', ))
+_ACTION_INCLUDE = add_internal_fqcns(('include', ))
+_ACTION_INCLUDE_ROLE = add_internal_fqcns(('include_role', ))
+_ACTION_INCLUDE_TASKS = add_internal_fqcns(('include_tasks', ))
+_ACTION_INCLUDE_VARS = add_internal_fqcns(('include_vars', ))
+_ACTION_META = add_internal_fqcns(('meta', ))
+_ACTION_SET_FACT = add_internal_fqcns(('set_fact', ))
+_ACTION_SETUP = add_internal_fqcns(('setup', ))
+_ACTION_HAS_CMD = add_internal_fqcns(('command', 'shell', 'script'))
+_ACTION_ALLOWS_RAW_ARGS = _ACTION_HAS_CMD + add_internal_fqcns(('raw', ))
+_ACTION_ALL_INCLUDES = _ACTION_INCLUDE + _ACTION_INCLUDE_TASKS + _ACTION_INCLUDE_ROLE
+_ACTION_ALL_IMPORT_PLAYBOOKS = _ACTION_INCLUDE + _ACTION_IMPORT_PLAYBOOK
+_ACTION_ALL_INCLUDE_IMPORT_TASKS = _ACTION_INCLUDE + _ACTION_INCLUDE_TASKS + _ACTION_IMPORT_TASKS
+_ACTION_ALL_PROPER_INCLUDE_IMPORT_ROLES = _ACTION_INCLUDE_ROLE + _ACTION_IMPORT_ROLE
+_ACTION_ALL_PROPER_INCLUDE_IMPORT_TASKS = _ACTION_INCLUDE_TASKS + _ACTION_IMPORT_TASKS
+_ACTION_ALL_INCLUDE_ROLE_TASKS = _ACTION_INCLUDE_ROLE + _ACTION_INCLUDE_TASKS
+_ACTION_ALL_INCLUDE_TASKS = _ACTION_INCLUDE + _ACTION_INCLUDE_TASKS
+_ACTION_FACT_GATHERING = _ACTION_SETUP + add_internal_fqcns(('gather_facts', ))
+_ACTION_WITH_CLEAN_FACTS = _ACTION_SET_FACT + _ACTION_INCLUDE_VARS
+
+# http://nezzen.net/2008/06/23/colored-text-in-python-using-ansi-escape-sequences/
+COLOR_CODES = {
+ 'black': u'0;30', 'bright gray': u'0;37',
+ 'blue': u'0;34', 'white': u'1;37',
+ 'green': u'0;32', 'bright blue': u'1;34',
+ 'cyan': u'0;36', 'bright green': u'1;32',
+ 'red': u'0;31', 'bright cyan': u'1;36',
+ 'purple': u'0;35', 'bright red': u'1;31',
+ 'yellow': u'0;33', 'bright purple': u'1;35',
+ 'dark gray': u'1;30', 'bright yellow': u'1;33',
+ 'magenta': u'0;35', 'bright magenta': u'1;35',
+ 'normal': u'0',
+}
+REJECT_EXTS = ('.pyc', '.pyo', '.swp', '.bak', '~', '.rpm', '.md', '.txt', '.rst')
BOOL_TRUE = BOOLEANS_TRUE
COLLECTION_PTYPE_COMPAT = {'module': 'modules'}
DEFAULT_BECOME_PASS = None
@@ -200,28 +203,3 @@ for setting in config.data.get_settings():
for warn in config.WARNINGS:
_warning(warn)
-
-
-# The following are hard-coded action names
-_ACTION_DEBUG = add_internal_fqcns(('debug', ))
-_ACTION_IMPORT_PLAYBOOK = add_internal_fqcns(('import_playbook', ))
-_ACTION_IMPORT_ROLE = add_internal_fqcns(('import_role', ))
-_ACTION_IMPORT_TASKS = add_internal_fqcns(('import_tasks', ))
-_ACTION_INCLUDE = add_internal_fqcns(('include', ))
-_ACTION_INCLUDE_ROLE = add_internal_fqcns(('include_role', ))
-_ACTION_INCLUDE_TASKS = add_internal_fqcns(('include_tasks', ))
-_ACTION_INCLUDE_VARS = add_internal_fqcns(('include_vars', ))
-_ACTION_META = add_internal_fqcns(('meta', ))
-_ACTION_SET_FACT = add_internal_fqcns(('set_fact', ))
-_ACTION_SETUP = add_internal_fqcns(('setup', ))
-_ACTION_HAS_CMD = add_internal_fqcns(('command', 'shell', 'script'))
-_ACTION_ALLOWS_RAW_ARGS = _ACTION_HAS_CMD + add_internal_fqcns(('raw', ))
-_ACTION_ALL_INCLUDES = _ACTION_INCLUDE + _ACTION_INCLUDE_TASKS + _ACTION_INCLUDE_ROLE
-_ACTION_ALL_IMPORT_PLAYBOOKS = _ACTION_INCLUDE + _ACTION_IMPORT_PLAYBOOK
-_ACTION_ALL_INCLUDE_IMPORT_TASKS = _ACTION_INCLUDE + _ACTION_INCLUDE_TASKS + _ACTION_IMPORT_TASKS
-_ACTION_ALL_PROPER_INCLUDE_IMPORT_ROLES = _ACTION_INCLUDE_ROLE + _ACTION_IMPORT_ROLE
-_ACTION_ALL_PROPER_INCLUDE_IMPORT_TASKS = _ACTION_INCLUDE_TASKS + _ACTION_IMPORT_TASKS
-_ACTION_ALL_INCLUDE_ROLE_TASKS = _ACTION_INCLUDE_ROLE + _ACTION_INCLUDE_TASKS
-_ACTION_ALL_INCLUDE_TASKS = _ACTION_INCLUDE + _ACTION_INCLUDE_TASKS
-_ACTION_FACT_GATHERING = _ACTION_SETUP + add_internal_fqcns(('gather_facts', ))
-_ACTION_WITH_CLEAN_FACTS = _ACTION_SET_FACT + _ACTION_INCLUDE_VARS
diff --git a/lib/ansible/errors/__init__.py b/lib/ansible/errors/__init__.py
index b942197e..f1179ab1 100644
--- a/lib/ansible/errors/__init__.py
+++ b/lib/ansible/errors/__init__.py
@@ -20,6 +20,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
+import traceback
from ansible.errors.yaml_strings import (
YAML_COMMON_DICT_ERROR,
@@ -53,22 +54,33 @@ class AnsibleError(Exception):
def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None):
super(AnsibleError, self).__init__(message)
+ self._show_content = show_content
+ self._suppress_extended_error = suppress_extended_error
+ self._message = to_native(message)
+ self.obj = obj
+ self.orig_exc = orig_exc
+
+ @property
+ def message(self):
# we import this here to prevent an import loop problem,
# since the objects code also imports ansible.errors
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
- self._obj = obj
- self._show_content = show_content
- if obj and isinstance(obj, AnsibleBaseYAMLObject):
+ message = [self._message]
+ if isinstance(self.obj, AnsibleBaseYAMLObject):
extended_error = self._get_extended_error()
- if extended_error and not suppress_extended_error:
- self.message = '%s\n\n%s' % (to_native(message), to_native(extended_error))
- else:
- self.message = '%s' % to_native(message)
- else:
- self.message = '%s' % to_native(message)
- if orig_exc:
- self.orig_exc = orig_exc
+ if extended_error and not self._suppress_extended_error:
+ message.append(
+ '\n\n%s' % to_native(extended_error)
+ )
+ elif self.orig_exc:
+ message.append('. %s' % to_native(self.orig_exc))
+
+ return ''.join(message)
+
+ @message.setter
+ def message(self, val):
+ self._message = val
def __str__(self):
return self.message
@@ -124,7 +136,7 @@ class AnsibleError(Exception):
error_message = ''
try:
- (src_file, line_number, col_number) = self._obj.ansible_pos
+ (src_file, line_number, col_number) = self.obj.ansible_pos
error_message += YAML_POSITION_DETAILS % (src_file, line_number, col_number)
if src_file not in ('<string>', '<unicode>') and self._show_content:
(target_line, prev_line) = self._get_error_lines_from_file(src_file, line_number - 1)
@@ -320,7 +332,7 @@ class AnsibleActionFail(AnsibleAction):
def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None, result=None):
super(AnsibleActionFail, self).__init__(message=message, obj=obj, show_content=show_content,
suppress_extended_error=suppress_extended_error, orig_exc=orig_exc, result=result)
- self.result.update({'failed': True, 'msg': message})
+ self.result.update({'failed': True, 'msg': message, 'exception': traceback.format_exc()})
class _AnsibleActionDone(AnsibleAction):
diff --git a/lib/ansible/executor/module_common.py b/lib/ansible/executor/module_common.py
index 15f2506a..ade9179c 100644
--- a/lib/ansible/executor/module_common.py
+++ b/lib/ansible/executor/module_common.py
@@ -29,11 +29,12 @@ import shlex
import zipfile
import re
import pkgutil
+from ast import AST, Import, ImportFrom
from io import BytesIO
from ansible.release import __version__, __author__
from ansible import constants as C
-from ansible.errors import AnsibleError, AnsiblePluginRemovedError
+from ansible.errors import AnsibleError
from ansible.executor.interpreter_discovery import InterpreterDiscoveryRequiredError
from ansible.executor.powershell import module_manifest as ps_manifest
from ansible.module_utils.common.json import AnsibleJSONEncoder
@@ -65,7 +66,7 @@ except NameError:
display = Display()
-ModuleUtilsProcessEntry = namedtuple('ModuleUtilsInfo', ['name_parts', 'is_ambiguous', 'has_redirected_child'])
+ModuleUtilsProcessEntry = namedtuple('ModuleUtilsInfo', ['name_parts', 'is_ambiguous', 'has_redirected_child', 'is_optional'])
REPLACER = b"#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
REPLACER_VERSION = b"\"<<ANSIBLE_VERSION>>\""
@@ -193,7 +194,8 @@ def _ansiballz_main():
basic._ANSIBLE_ARGS = json_params
%(coverage)s
# Run the module! By importing it as '__main__', it thinks it is executing as a script
- runpy.run_module(mod_name='%(module_fqn)s', init_globals=None, run_name='__main__', alter_sys=True)
+ runpy.run_module(mod_name='%(module_fqn)s', init_globals=dict(_module_fqn='%(module_fqn)s', _modlib_path=modlib_path),
+ run_name='__main__', alter_sys=True)
# Ansible modules must exit themselves
print('{"msg": "New-style module did not handle its own exit", "failed": true}')
@@ -236,10 +238,6 @@ def _ansiballz_main():
basedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'debug_dir')
args_path = os.path.join(basedir, 'args')
- if command == 'excommunicate':
- print('The excommunicate debug command is deprecated and will be removed in 2.11. Use execute instead.')
- command = 'execute'
-
if command == 'explode':
# transform the ZIPDATA into an exploded directory of code and then
# print the path to the code. This is an easy way for people to look
@@ -315,6 +313,7 @@ def _ansiballz_main():
temp_path = tempfile.mkdtemp(prefix='ansible_%(ansible_module)s_payload_')
zipped_mod = os.path.join(temp_path, 'ansible_%(ansible_module)s_payload.zip')
+
with open(zipped_mod, 'wb') as modlib:
modlib.write(base64.b64decode(ZIPDATA))
@@ -442,7 +441,7 @@ NEW_STYLE_PYTHON_MODULE_RE = re.compile(
class ModuleDepFinder(ast.NodeVisitor):
- def __init__(self, module_fqn, is_pkg_init=False, *args, **kwargs):
+ def __init__(self, module_fqn, tree, is_pkg_init=False, *args, **kwargs):
"""
Walk the ast tree for the python module.
:arg module_fqn: The fully qualified name to reach this module in dotted notation.
@@ -466,10 +465,37 @@ class ModuleDepFinder(ast.NodeVisitor):
.. seealso:: :python3:class:`ast.NodeVisitor`
"""
super(ModuleDepFinder, self).__init__(*args, **kwargs)
+ self._tree = tree # squirrel this away so we can compare node parents to it
self.submodules = set()
+ self.optional_imports = set()
self.module_fqn = module_fqn
self.is_pkg_init = is_pkg_init
+ self._visit_map = {
+ Import: self.visit_Import,
+ ImportFrom: self.visit_ImportFrom,
+ }
+
+ self.visit(tree)
+
+ def generic_visit(self, node):
+ """Overridden ``generic_visit`` that makes some assumptions about our
+ use case, and improves performance by calling visitors directly instead
+ of calling ``visit`` to offload calling visitors.
+ """
+ generic_visit = self.generic_visit
+ visit_map = self._visit_map
+ for field, value in ast.iter_fields(node):
+ if isinstance(value, list):
+ for item in value:
+ if isinstance(item, (Import, ImportFrom)):
+ item.parent = node
+ visit_map[item.__class__](item)
+ elif isinstance(item, AST):
+ generic_visit(item)
+
+ visit = generic_visit
+
def visit_Import(self, node):
"""
Handle import ansible.module_utils.MODLIB[.MODLIBn] [as asname]
@@ -482,6 +508,9 @@ class ModuleDepFinder(ast.NodeVisitor):
alias.name.startswith('ansible_collections.')):
py_mod = tuple(alias.name.split('.'))
self.submodules.add(py_mod)
+ # if the import's parent is the root document, it's a required import, otherwise it's optional
+ if node.parent != self._tree:
+ self.optional_imports.add(py_mod)
self.generic_visit(node)
def visit_ImportFrom(self, node):
@@ -543,6 +572,9 @@ class ModuleDepFinder(ast.NodeVisitor):
if py_mod:
for alias in node.names:
self.submodules.add(py_mod + (alias.name,))
+ # if the import's parent is the root document, it's a required import, otherwise it's optional
+ if node.parent != self._tree:
+ self.optional_imports.add(py_mod + (alias.name,))
self.generic_visit(node)
@@ -555,7 +587,7 @@ def _slurp(path):
return data
-def _get_shebang(interpreter, task_vars, templar, args=tuple()):
+def _get_shebang(interpreter, task_vars, templar, args=tuple(), remote_is_local=False):
"""
Note not stellar API:
Returns None instead of always returning a shebang line. Doing it this
@@ -563,55 +595,71 @@ def _get_shebang(interpreter, task_vars, templar, args=tuple()):
file rather than trust that we reformatted what they already have
correctly.
"""
- interpreter_name = os.path.basename(interpreter).strip()
-
# FUTURE: add logical equivalence for python3 in the case of py3-only modules
- # check for first-class interpreter config
- interpreter_config_key = "INTERPRETER_%s" % interpreter_name.upper()
+ interpreter_name = os.path.basename(interpreter).strip()
- if C.config.get_configuration_definitions().get(interpreter_config_key):
- # a config def exists for this interpreter type; consult config for the value
- interpreter_out = C.config.get_config_value(interpreter_config_key, variables=task_vars)
- discovered_interpreter_config = u'discovered_interpreter_%s' % interpreter_name
+ # name for interpreter var
+ interpreter_config = u'ansible_%s_interpreter' % interpreter_name
+ # key for config
+ interpreter_config_key = "INTERPRETER_%s" % interpreter_name.upper()
- interpreter_out = templar.template(interpreter_out.strip())
+ interpreter_out = None
- facts_from_task_vars = task_vars.get('ansible_facts', {})
+ # looking for python, rest rely on matching vars
+ if interpreter_name == 'python':
+ # skip detection for network os execution, use playbook supplied one if possible
+ if remote_is_local:
+ interpreter_out = task_vars['ansible_playbook_python']
- # handle interpreter discovery if requested
- if interpreter_out in ['auto', 'auto_legacy', 'auto_silent', 'auto_legacy_silent']:
- if discovered_interpreter_config not in facts_from_task_vars:
- # interpreter discovery is desired, but has not been run for this host
- raise InterpreterDiscoveryRequiredError("interpreter discovery needed",
- interpreter_name=interpreter_name,
- discovery_mode=interpreter_out)
- else:
- interpreter_out = facts_from_task_vars[discovered_interpreter_config]
- else:
- # a config def does not exist for this interpreter type; consult vars for a possible direct override
- interpreter_config = u'ansible_%s_interpreter' % interpreter_name
+ # a config def exists for this interpreter type; consult config for the value
+ elif C.config.get_configuration_definition(interpreter_config_key):
- if interpreter_config not in task_vars:
- return None, interpreter
+ interpreter_from_config = C.config.get_config_value(interpreter_config_key, variables=task_vars)
+ interpreter_out = templar.template(interpreter_from_config.strip())
- interpreter_out = templar.template(task_vars[interpreter_config].strip())
+ # handle interpreter discovery if requested or empty interpreter was provided
+ if not interpreter_out or interpreter_out in ['auto', 'auto_legacy', 'auto_silent', 'auto_legacy_silent']:
- shebang = u'#!' + interpreter_out
+ discovered_interpreter_config = u'discovered_interpreter_%s' % interpreter_name
+ facts_from_task_vars = task_vars.get('ansible_facts', {})
- if args:
- shebang = shebang + u' ' + u' '.join(args)
+ if discovered_interpreter_config not in facts_from_task_vars:
+ # interpreter discovery is desired, but has not been run for this host
+ raise InterpreterDiscoveryRequiredError("interpreter discovery needed", interpreter_name=interpreter_name, discovery_mode=interpreter_out)
+ else:
+ interpreter_out = facts_from_task_vars[discovered_interpreter_config]
+ else:
+ raise InterpreterDiscoveryRequiredError("interpreter discovery required", interpreter_name=interpreter_name, discovery_mode='auto_legacy')
+
+ elif interpreter_config in task_vars:
+ # for non python we consult vars for a possible direct override
+ interpreter_out = templar.template(task_vars.get(interpreter_config).strip())
+
+ if not interpreter_out:
+ # nothing matched(None) or in case someone configures empty string or empty intepreter
+ interpreter_out = interpreter
+ shebang = None
+ elif interpreter_out == interpreter:
+ # no change, no new shebang
+ shebang = None
+ else:
+ # set shebang cause we changed interpreter
+ shebang = u'#!' + interpreter_out
+ if args:
+ shebang = shebang + u' ' + u' '.join(args)
return shebang, interpreter_out
class ModuleUtilLocatorBase:
- def __init__(self, fq_name_parts, is_ambiguous=False, child_is_redirected=False):
+ def __init__(self, fq_name_parts, is_ambiguous=False, child_is_redirected=False, is_optional=False):
self._is_ambiguous = is_ambiguous
# a child package redirection could cause intermediate package levels to be missing, eg
# from ansible.module_utils.x.y.z import foo; if x.y.z.foo is redirected, we may not have packages on disk for
# the intermediate packages x.y.z, so we'll need to supply empty packages for those
self._child_is_redirected = child_is_redirected
+ self._is_optional = is_optional
self.found = False
self.redirected = False
self.fq_name_parts = fq_name_parts
@@ -640,6 +688,8 @@ class ModuleUtilLocatorBase:
try:
collection_metadata = _get_collection_metadata(self._collection_name)
except ValueError as ve: # collection not found or some other error related to collection load
+ if self._is_optional:
+ return False
raise AnsibleError('error processing module_util {0} loading redirected collection {1}: {2}'
.format('.'.join(name_parts), self._collection_name, to_native(ve)))
@@ -798,8 +848,8 @@ class LegacyModuleUtilLocator(ModuleUtilLocatorBase):
class CollectionModuleUtilLocator(ModuleUtilLocatorBase):
- def __init__(self, fq_name_parts, is_ambiguous=False, child_is_redirected=False):
- super(CollectionModuleUtilLocator, self).__init__(fq_name_parts, is_ambiguous, child_is_redirected)
+ def __init__(self, fq_name_parts, is_ambiguous=False, child_is_redirected=False, is_optional=False):
+ super(CollectionModuleUtilLocator, self).__init__(fq_name_parts, is_ambiguous, child_is_redirected, is_optional)
if fq_name_parts[0] != 'ansible_collections':
raise Exception('CollectionModuleUtilLocator can only locate from ansible_collections, got {0}'.format(fq_name_parts))
@@ -891,20 +941,19 @@ def recursive_finder(name, module_fqn, module_data, zf):
except (SyntaxError, IndentationError) as e:
raise AnsibleError("Unable to import %s due to %s" % (name, e.msg))
- finder = ModuleDepFinder(module_fqn)
- finder.visit(tree)
+ finder = ModuleDepFinder(module_fqn, tree)
# the format of this set is a tuple of the module name and whether or not the import is ambiguous as a module name
# or an attribute of a module (eg from x.y import z <-- is z a module or an attribute of x.y?)
- modules_to_process = [ModuleUtilsProcessEntry(m, True, False) for m in finder.submodules]
+ modules_to_process = [ModuleUtilsProcessEntry(m, True, False, is_optional=m in finder.optional_imports) for m in finder.submodules]
# HACK: basic is currently always required since module global init is currently tied up with AnsiballZ arg input
- modules_to_process.append(ModuleUtilsProcessEntry(('ansible', 'module_utils', 'basic'), False, False))
+ modules_to_process.append(ModuleUtilsProcessEntry(('ansible', 'module_utils', 'basic'), False, False, is_optional=False))
# we'll be adding new modules inline as we discover them, so just keep going til we've processed them all
while modules_to_process:
modules_to_process.sort() # not strictly necessary, but nice to process things in predictable and repeatable order
- py_module_name, is_ambiguous, child_is_redirected = modules_to_process.pop(0)
+ py_module_name, is_ambiguous, child_is_redirected, is_optional = modules_to_process.pop(0)
if py_module_name in py_module_cache:
# this is normal; we'll often see the same module imported many times, but we only need to process it once
@@ -914,7 +963,8 @@ def recursive_finder(name, module_fqn, module_data, zf):
module_info = LegacyModuleUtilLocator(py_module_name, is_ambiguous=is_ambiguous,
mu_paths=module_utils_paths, child_is_redirected=child_is_redirected)
elif py_module_name[0] == 'ansible_collections':
- module_info = CollectionModuleUtilLocator(py_module_name, is_ambiguous=is_ambiguous, child_is_redirected=child_is_redirected)
+ module_info = CollectionModuleUtilLocator(py_module_name, is_ambiguous=is_ambiguous,
+ child_is_redirected=child_is_redirected, is_optional=is_optional)
else:
# FIXME: dot-joined result
display.warning('ModuleDepFinder improperly found a non-module_utils import %s'
@@ -923,6 +973,9 @@ def recursive_finder(name, module_fqn, module_data, zf):
# Could not find the module. Construct a helpful error message.
if not module_info.found:
+ if is_optional:
+ # this was a best-effort optional import that we couldn't find, oh well, move along...
+ continue
# FIXME: use dot-joined candidate names
msg = 'Could not find imported module support code for {0}. Looked for ({1})'.format(module_fqn, module_info.candidate_names_joined)
raise AnsibleError(msg)
@@ -938,9 +991,9 @@ def recursive_finder(name, module_fqn, module_data, zf):
except (SyntaxError, IndentationError) as e:
raise AnsibleError("Unable to import %s due to %s" % (module_info.fq_name_parts, e.msg))
- finder = ModuleDepFinder('.'.join(module_info.fq_name_parts), module_info.is_package)
- finder.visit(tree)
- modules_to_process.extend(ModuleUtilsProcessEntry(m, True, False) for m in finder.submodules if m not in py_module_cache)
+ finder = ModuleDepFinder('.'.join(module_info.fq_name_parts), tree, module_info.is_package)
+ modules_to_process.extend(ModuleUtilsProcessEntry(m, True, False, is_optional=m in finder.optional_imports)
+ for m in finder.submodules if m not in py_module_cache)
# we've processed this item, add it to the output list
py_module_cache[module_info.fq_name_parts] = (module_info.source_code, module_info.output_path)
@@ -951,7 +1004,7 @@ def recursive_finder(name, module_fqn, module_data, zf):
accumulated_pkg_name.append(pkg) # we're accumulating this across iterations
normalized_name = tuple(accumulated_pkg_name) # extra machinations to get a hashable type (list is not)
if normalized_name not in py_module_cache:
- modules_to_process.append((normalized_name, False, module_info.redirected))
+ modules_to_process.append(ModuleUtilsProcessEntry(normalized_name, False, module_info.redirected, is_optional=is_optional))
for py_module_name in py_module_cache:
py_module_file_name = py_module_cache[py_module_name][1]
@@ -1029,7 +1082,7 @@ def _add_module_to_zip(zf, remote_module_fqn, b_module_data):
def _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, templar, module_compression, async_timeout, become,
- become_method, become_user, become_password, become_flags, environment):
+ become_method, become_user, become_password, become_flags, environment, remote_is_local=False):
"""
Given the source of the module, convert it to a Jinja2 template to insert
module code and return whether it's a new or old style module.
@@ -1077,7 +1130,6 @@ def _find_module_utils(module_name, b_module_data, module_path, module_args, tas
return b_module_data, module_style, shebang
output = BytesIO()
- py_module_names = set()
try:
remote_module_fqn = _get_ansible_module_fqn(module_path)
@@ -1148,9 +1200,15 @@ def _find_module_utils(module_name, b_module_data, module_path, module_args, tas
# so that no one looking for the file reads a partially
# written file)
if not os.path.exists(lookup_path):
- # Note -- if we have a global function to setup, that would
- # be a better place to run this
- os.makedirs(lookup_path)
+ try:
+ # Note -- if we have a global function to setup, that would
+ # be a better place to run this
+ os.makedirs(lookup_path)
+ except OSError:
+ # Multiple processes tried to create the directory. If it still does not
+ # exist, raise the original exception.
+ if not os.path.exists(lookup_path):
+ raise
display.debug('ANSIBALLZ: Writing module')
with open(cached_module_filename + '-part', 'wb') as f:
f.write(zipdata)
@@ -1175,7 +1233,7 @@ def _find_module_utils(module_name, b_module_data, module_path, module_args, tas
'Look at traceback for that process for debugging information.')
zipdata = to_text(zipdata, errors='surrogate_or_strict')
- shebang, interpreter = _get_shebang(u'/usr/bin/python', task_vars, templar)
+ shebang, interpreter = _get_shebang(u'/usr/bin/python', task_vars, templar, remote_is_local=remote_is_local)
if shebang is None:
shebang = u'#!/usr/bin/python'
@@ -1267,7 +1325,7 @@ def _find_module_utils(module_name, b_module_data, module_path, module_args, tas
def modify_module(module_name, module_path, module_args, templar, task_vars=None, module_compression='ZIP_STORED', async_timeout=0, become=False,
- become_method=None, become_user=None, become_password=None, become_flags=None, environment=None):
+ become_method=None, become_user=None, become_password=None, become_flags=None, environment=None, remote_is_local=False):
"""
Used to insert chunks of code into modules before transfer rather than
doing regular python imports. This allows for more efficient transfer in
@@ -1299,7 +1357,7 @@ def modify_module(module_name, module_path, module_args, templar, task_vars=None
(b_module_data, module_style, shebang) = _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, templar, module_compression,
async_timeout=async_timeout, become=become, become_method=become_method,
become_user=become_user, become_password=become_password, become_flags=become_flags,
- environment=environment)
+ environment=environment, remote_is_local=remote_is_local)
if module_style == 'binary':
return (b_module_data, module_style, to_text(shebang, nonstring='passthru'))
@@ -1313,7 +1371,7 @@ def modify_module(module_name, module_path, module_args, templar, task_vars=None
# _get_shebang() takes text strings
args = [to_text(a, errors='surrogate_or_strict') for a in args]
interpreter = args[0]
- b_new_shebang = to_bytes(_get_shebang(interpreter, task_vars, templar, args[1:])[0],
+ b_new_shebang = to_bytes(_get_shebang(interpreter, task_vars, templar, args[1:], remote_is_local=remote_is_local)[0],
errors='surrogate_or_strict', nonstring='passthru')
if b_new_shebang:
@@ -1380,9 +1438,14 @@ def get_action_args_with_defaults(action, args, defaults, templar, redirected_na
tmp_args.update((module_defaults.get('group/%s' % group_name) or {}).copy())
# handle specific action defaults
- for action in redirected_names:
- if action in module_defaults:
- tmp_args.update(module_defaults[action].copy())
+ for redirected_action in redirected_names:
+ legacy = None
+ if redirected_action.startswith('ansible.legacy.') and action == redirected_action:
+ legacy = redirected_action.split('ansible.legacy.')[-1]
+ if legacy and legacy in module_defaults:
+ tmp_args.update(module_defaults[legacy].copy())
+ if redirected_action in module_defaults:
+ tmp_args.update(module_defaults[redirected_action].copy())
# direct args override all
tmp_args.update(args)
diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py
index 1a53f3e7..9927bd5b 100644
--- a/lib/ansible/executor/play_iterator.py
+++ b/lib/ansible/executor/play_iterator.py
@@ -244,7 +244,7 @@ class PlayIterator:
display.debug("host %s is done iterating, returning" % host.name)
return (s, None)
- (s, task) = self._get_next_task_from_state(s, host=host, peek=peek)
+ (s, task) = self._get_next_task_from_state(s, host=host)
if not peek:
self._host_states[host.name] = s
@@ -254,7 +254,7 @@ class PlayIterator:
display.debug(" ^ state is: %s" % s)
return (s, task)
- def _get_next_task_from_state(self, state, host, peek, in_child=False):
+ def _get_next_task_from_state(self, state, host):
task = None
@@ -318,7 +318,7 @@ class PlayIterator:
# have one recurse into it for the next task. If we're done with the child
# state, we clear it and drop back to getting the next task from the list.
if state.tasks_child_state:
- (state.tasks_child_state, task) = self._get_next_task_from_state(state.tasks_child_state, host=host, peek=peek, in_child=True)
+ (state.tasks_child_state, task) = self._get_next_task_from_state(state.tasks_child_state, host=host)
if self._check_failed_state(state.tasks_child_state):
# failed child state, so clear it and move into the rescue portion
state.tasks_child_state = None
@@ -359,7 +359,7 @@ class PlayIterator:
self._play._removed_hosts.remove(host.name)
if state.rescue_child_state:
- (state.rescue_child_state, task) = self._get_next_task_from_state(state.rescue_child_state, host=host, peek=peek, in_child=True)
+ (state.rescue_child_state, task) = self._get_next_task_from_state(state.rescue_child_state, host=host)
if self._check_failed_state(state.rescue_child_state):
state.rescue_child_state = None
self._set_failed_state(state)
@@ -389,7 +389,7 @@ class PlayIterator:
# run state to ITERATING_COMPLETE in the event of any errors, or when we
# have hit the end of the list of blocks.
if state.always_child_state:
- (state.always_child_state, task) = self._get_next_task_from_state(state.always_child_state, host=host, peek=peek, in_child=True)
+ (state.always_child_state, task) = self._get_next_task_from_state(state.always_child_state, host=host)
if self._check_failed_state(state.always_child_state):
state.always_child_state = None
self._set_failed_state(state)
@@ -411,11 +411,6 @@ class PlayIterator:
state.rescue_child_state = None
state.always_child_state = None
state.did_rescue = False
-
- # we're advancing blocks, so if this was an end-of-role block we
- # mark the current role complete
- if block._eor and host.name in block._role._had_task_run and not in_child and not peek:
- block._role._completed[host.name] = True
else:
task = block.always[state.cur_always_task]
if isinstance(task, Block):
diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py
index aacf1353..bfaecba9 100644
--- a/lib/ansible/executor/playbook_executor.py
+++ b/lib/ansible/executor/playbook_executor.py
@@ -30,10 +30,13 @@ from ansible.plugins.loader import become_loader, connection_loader, shell_loade
from ansible.playbook import Playbook
from ansible.template import Templar
from ansible.utils.helpers import pct_to_int
+from ansible.utils.collection_loader import AnsibleCollectionConfig
+from ansible.utils.collection_loader._collection_finder import _get_collection_name_from_path, _get_collection_playbook_path
from ansible.utils.path import makedirs_safe
from ansible.utils.ssh_functions import set_default_transport
from ansible.utils.display import Display
+
display = Display()
@@ -87,7 +90,24 @@ class PlaybookExecutor:
list(shell_loader.all(class_only=True))
list(become_loader.all(class_only=True))
- for playbook_path in self._playbooks:
+ for playbook in self._playbooks:
+
+ # deal with FQCN
+ resource = _get_collection_playbook_path(playbook)
+ if resource is not None:
+ playbook_path = resource[1]
+ playbook_collection = resource[2]
+ else:
+ playbook_path = playbook
+ # not fqcn, but might still be colleciotn playbook
+ playbook_collection = _get_collection_name_from_path(playbook)
+
+ if playbook_collection:
+ display.warning("running playbook inside collection {0}".format(playbook_collection))
+ AnsibleCollectionConfig.default_collection = playbook_collection
+ else:
+ AnsibleCollectionConfig.default_collection = None
+
pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader)
# FIXME: move out of inventory self._inventory.set_playbook_basedir(os.path.realpath(os.path.dirname(playbook_path)))
diff --git a/lib/ansible/executor/powershell/coverage_wrapper.ps1 b/lib/ansible/executor/powershell/coverage_wrapper.ps1
index 5044ab92..39bfa05c 100644
--- a/lib/ansible/executor/powershell/coverage_wrapper.ps1
+++ b/lib/ansible/executor/powershell/coverage_wrapper.ps1
@@ -54,7 +54,7 @@ Function New-CoverageBreakpoint {
$info
}
-Function Compare-WhitelistPattern {
+Function Compare-PathFilterPattern {
Param (
[String[]]$Patterns,
[String]$Path
@@ -85,7 +85,7 @@ $file_encoding = 'UTF8'
try {
$scripts = [System.Collections.Generic.List`1[System.Object]]@($script:common_functions)
- $coverage_whitelist = $Payload.coverage.whitelist.Split(":", [StringSplitOptions]::RemoveEmptyEntries)
+ $coverage_path_filter = $Payload.coverage.path_filter.Split(":", [StringSplitOptions]::RemoveEmptyEntries)
# We need to track what utils have already been added to the script for loading. This is because the load
# order is important and can have module_utils that rely on other utils.
@@ -104,7 +104,7 @@ try {
Set-Content -LiteralPath $util_path -Value $util_code -Encoding $file_encoding
$ansible_path = $Payload.coverage.module_util_paths.$util_name
- if ((Compare-WhitelistPattern -Patterns $coverage_whitelist -Path $ansible_path)) {
+ if ((Compare-PathFilterPattern -Patterns $coverage_path_filter -Path $ansible_path)) {
$cov_params = @{
Path = $util_path
Code = $util_sb
@@ -133,7 +133,7 @@ try {
$scripts.Add($module_path)
$ansible_path = $Payload.coverage.module_path
- if ((Compare-WhitelistPattern -Patterns $coverage_whitelist -Path $ansible_path)) {
+ if ((Compare-PathFilterPattern -Patterns $coverage_path_filter -Path $ansible_path)) {
$cov_params = @{
Path = $module_path
Code = [ScriptBlock]::Create($module)
diff --git a/lib/ansible/executor/powershell/module_manifest.py b/lib/ansible/executor/powershell/module_manifest.py
index 83a1c3a7..a784d244 100644
--- a/lib/ansible/executor/powershell/module_manifest.py
+++ b/lib/ansible/executor/powershell/module_manifest.py
@@ -340,8 +340,8 @@ def _create_powershell_wrapper(b_module_data, module_path, module_args,
finder.scan_exec_script('coverage_wrapper')
coverage_manifest['output'] = coverage_output
- coverage_whitelist = C.config.get_config_value('COVERAGE_REMOTE_WHITELIST', variables=task_vars)
- coverage_manifest['whitelist'] = coverage_whitelist
+ coverage_enabled = C.config.get_config_value('COVERAGE_REMOTE_PATHS', variables=task_vars)
+ coverage_manifest['path_filter'] = coverage_enabled
# make sure Ansible.ModuleUtils.AddType is added if any C# utils are used
if len(finder.cs_utils_wrapper) > 0 or len(finder.cs_utils_module) > 0:
diff --git a/lib/ansible/executor/process/worker.py b/lib/ansible/executor/process/worker.py
index 0b18fc35..510928c0 100644
--- a/lib/ansible/executor/process/worker.py
+++ b/lib/ansible/executor/process/worker.py
@@ -134,6 +134,18 @@ class WorkerProcess(multiprocessing_context.Process):
return self._run()
except BaseException as e:
self._hard_exit(e)
+ finally:
+ # This is a hack, pure and simple, to work around a potential deadlock
+ # in ``multiprocessing.Process`` when flushing stdout/stderr during process
+ # shutdown. We have various ``Display`` calls that may fire from a fork
+ # so we cannot do this early. Instead, this happens at the very end
+ # to avoid that deadlock, by simply side stepping it. This should not be
+ # treated as a long term fix. Additionally this behavior only presents itself
+ # on Python3. Python2 does not exhibit the deadlock behavior.
+ # TODO: Evaluate overhauling ``Display`` to not write directly to stdout
+ # and evaluate migrating away from the ``fork`` multiprocessing start method.
+ if sys.version_info[0] >= 3:
+ sys.stdout = sys.stderr = open(os.devnull, 'w')
def _run(self):
'''
@@ -166,41 +178,38 @@ class WorkerProcess(multiprocessing_context.Process):
display.debug("done running TaskExecutor() for %s/%s [%s]" % (self._host, self._task, self._task._uuid))
self._host.vars = dict()
self._host.groups = []
- task_result = TaskResult(
+
+ # put the result on the result queue
+ display.debug("sending task result for task %s" % self._task._uuid)
+ self._final_q.send_task_result(
self._host.name,
self._task._uuid,
executor_result,
task_fields=self._task.dump_attrs(),
)
-
- # put the result on the result queue
- display.debug("sending task result for task %s" % self._task._uuid)
- self._final_q.put(task_result)
display.debug("done sending task result for task %s" % self._task._uuid)
except AnsibleConnectionFailure:
self._host.vars = dict()
self._host.groups = []
- task_result = TaskResult(
+ self._final_q.send_task_result(
self._host.name,
self._task._uuid,
dict(unreachable=True),
task_fields=self._task.dump_attrs(),
)
- self._final_q.put(task_result, block=False)
except Exception as e:
if not isinstance(e, (IOError, EOFError, KeyboardInterrupt, SystemExit)) or isinstance(e, TemplateNotFound):
try:
self._host.vars = dict()
self._host.groups = []
- task_result = TaskResult(
+ self._final_q.send_task_result(
self._host.name,
self._task._uuid,
dict(failed=True, exception=to_text(traceback.format_exc()), stdout=''),
task_fields=self._task.dump_attrs(),
)
- self._final_q.put(task_result, block=False)
except Exception:
display.debug(u"WORKER EXCEPTION: %s" % to_text(e))
display.debug(u"WORKER TRACEBACK: %s" % to_text(traceback.format_exc()))
diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py
index ec1fc976..ff48a32d 100644
--- a/lib/ansible/executor/task_executor.py
+++ b/lib/ansible/executor/task_executor.py
@@ -5,7 +5,6 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
-import re
import pty
import time
import json
@@ -20,7 +19,7 @@ from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVar
from ansible.executor.task_result import TaskResult
from ansible.executor.module_common import get_action_args_with_defaults
from ansible.module_utils.parsing.convert_bool import boolean
-from ansible.module_utils.six import iteritems, string_types, binary_type
+from ansible.module_utils.six import iteritems, binary_type
from ansible.module_utils.six.moves import xrange
from ansible.module_utils._text import to_text, to_native
from ansible.module_utils.connection import write_to_file_descriptor
@@ -83,10 +82,6 @@ class TaskExecutor:
class.
'''
- # Modules that we optimize by squashing loop items into a single call to
- # the module
- SQUASH_ACTIONS = frozenset(C.DEFAULT_SQUASH_ACTIONS)
-
def __init__(self, host, task, job_vars, play_context, new_stdin, loader, shared_loader_obj, final_q):
self._host = host
self._task = task
@@ -126,10 +121,13 @@ class TaskExecutor:
# create the overall result item
res = dict(results=item_results)
- # loop through the item results, and set the global changed/failed result flags based on any item.
+ # loop through the item results and set the global changed/failed/skipped result flags based on any item.
+ res['skipped'] = True
for item in item_results:
if 'changed' in item and item['changed'] and not res.get('changed'):
res['changed'] = True
+ if res['skipped'] and ('skipped' not in item or ('skipped' in item and not item['skipped'])):
+ res['skipped'] = False
if 'failed' in item and item['failed']:
item_ignore = item.pop('_ansible_ignore_errors')
if not res.get('failed'):
@@ -149,8 +147,10 @@ class TaskExecutor:
res[array] = res[array] + item[array]
del item[array]
- if not res.get('Failed', False):
+ if not res.get('failed', False):
res['msg'] = 'All items completed'
+ if res['skipped']:
+ res['msg'] = 'All items skipped'
else:
res = dict(changed=False, skipped=True, skipped_reason='No items in the list', results=[])
else:
@@ -213,7 +213,7 @@ class TaskExecutor:
if self._loader.get_basedir() not in self._job_vars['ansible_search_path']:
self._job_vars['ansible_search_path'].append(self._loader.get_basedir())
- templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=self._job_vars)
+ templar = Templar(loader=self._loader, variables=self._job_vars)
items = None
loop_cache = self._job_vars.get('_ansible_loop_cache')
if loop_cache is not None:
@@ -276,7 +276,7 @@ class TaskExecutor:
label = None
loop_pause = 0
extended = False
- templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=self._job_vars)
+ templar = Templar(loader=self._loader, variables=self._job_vars)
# FIXME: move this to the object itself to allow post_validate to take care of templating (loop_control.post_validate)
if self._task.loop_control:
@@ -298,9 +298,6 @@ class TaskExecutor:
u" to something else to avoid variable collisions and unexpected behavior." % loop_var)
ran_once = False
- if self._task.loop_with:
- # Only squash with 'with_:' not with the 'loop:', 'magic' squashing can be removed once with_ loops are
- items = self._squash_items(items, loop_var, task_vars)
no_log = False
items_len = len(items)
@@ -384,15 +381,21 @@ class TaskExecutor:
'msg': 'Failed to template loop_control.label: %s' % to_text(e)
})
- self._final_q.put(
- TaskResult(
- self._host.name,
- self._task._uuid,
- res,
- task_fields=task_fields,
- ),
- block=False,
+ tr = TaskResult(
+ self._host.name,
+ self._task._uuid,
+ res,
+ task_fields=task_fields,
)
+ if tr.is_failed() or tr.is_unreachable():
+ self._final_q.send_callback('v2_runner_item_on_failed', tr)
+ elif tr.is_skipped():
+ self._final_q.send_callback('v2_runner_item_on_skipped', tr)
+ else:
+ if getattr(self._task, 'diff', False):
+ self._final_q.send_callback('v2_on_file_diff', tr)
+ self._final_q.send_callback('v2_runner_item_on_ok', tr)
+
results.append(res)
del task_vars[loop_var]
@@ -414,90 +417,6 @@ class TaskExecutor:
return results
- def _squash_items(self, items, loop_var, variables):
- '''
- Squash items down to a comma-separated list for certain modules which support it
- (typically package management modules).
- '''
- name = None
- try:
- # _task.action could contain templatable strings (via action: and
- # local_action:) Template it before comparing. If we don't end up
- # optimizing it here, the templatable string might use template vars
- # that aren't available until later (it could even use vars from the
- # with_items loop) so don't make the templated string permanent yet.
- templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables)
- task_action = self._task.action
- if templar.is_template(task_action):
- task_action = templar.template(task_action, fail_on_undefined=False)
-
- if len(items) > 0 and task_action in self.SQUASH_ACTIONS:
- if all(isinstance(o, string_types) for o in items):
- final_items = []
-
- found = None
- for allowed in ['name', 'pkg', 'package']:
- name = self._task.args.pop(allowed, None)
- if name is not None:
- found = allowed
- break
-
- # This gets the information to check whether the name field
- # contains a template that we can squash for
- template_no_item = template_with_item = None
- if name:
- if templar.is_template(name):
- variables[loop_var] = '\0$'
- template_no_item = templar.template(name, variables, cache=False)
- variables[loop_var] = '\0@'
- template_with_item = templar.template(name, variables, cache=False)
- del variables[loop_var]
-
- # Check if the user is doing some operation that doesn't take
- # name/pkg or the name/pkg field doesn't have any variables
- # and thus the items can't be squashed
- if template_no_item != template_with_item:
- if self._task.loop_with and self._task.loop_with not in ('items', 'list'):
- value_text = "\"{{ query('%s', %r) }}\"" % (self._task.loop_with, self._task.loop)
- else:
- value_text = '%r' % self._task.loop
- # Without knowing the data structure well, it's easiest to strip python2 unicode
- # literals after stringifying
- value_text = re.sub(r"\bu'", "'", value_text)
-
- display.deprecated(
- 'Invoking "%s" only once while using a loop via squash_actions is deprecated. '
- 'Instead of using a loop to supply multiple items and specifying `%s: "%s"`, '
- 'please use `%s: %s` and remove the loop' % (self._task.action, found, name, found, value_text),
- version='2.11', collection_name='ansible.builtin'
- )
- for item in items:
- variables[loop_var] = item
- if self._task.evaluate_conditional(templar, variables):
- new_item = templar.template(name, cache=False)
- final_items.append(new_item)
- self._task.args['name'] = final_items
- # Wrap this in a list so that the calling function loop
- # executes exactly once
- return [final_items]
- else:
- # Restore the name parameter
- self._task.args['name'] = name
- # elif:
- # Right now we only optimize single entries. In the future we
- # could optimize more types:
- # * lists can be squashed together
- # * dicts could squash entries that match in all cases except the
- # name or pkg field.
- except Exception:
- # Squashing is an optimization. If it fails for any reason,
- # simply use the unoptimized list of items.
-
- # Restore the name parameter
- if name is not None:
- self._task.args['name'] = name
- return items
-
def _execute(self, variables=None):
'''
The primary workhorse of the executor system, this runs the task
@@ -508,7 +427,7 @@ class TaskExecutor:
if variables is None:
variables = self._job_vars
- templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables)
+ templar = Templar(loader=self._loader, variables=variables)
context_validation_error = None
try:
@@ -560,8 +479,8 @@ class TaskExecutor:
if self._loop_eval_error is not None:
raise self._loop_eval_error # pylint: disable=raising-bad-type
- # if we ran into an error while setting up the PlayContext, raise it now
- if context_validation_error is not None:
+ # if we ran into an error while setting up the PlayContext, raise it now, unless is known issue with delegation
+ if context_validation_error is not None and not (self._task.delegate_to and isinstance(context_validation_error, AnsibleUndefinedVariable)):
raise context_validation_error # pylint: disable=raising-bad-type
# if this task is a TaskInclude, we just return now with a success code so the
@@ -691,7 +610,6 @@ class TaskExecutor:
if self._task.async_val > 0:
if self._task.poll > 0 and not result.get('skipped') and not result.get('failed'):
result = self._poll_async_result(result=result, templar=templar, task_vars=vars_copy)
- # FIXME callback 'v2_runner_on_async_poll' here
# ensure no log is preserved
result["_ansible_no_log"] = self._play_context.no_log
@@ -713,7 +631,7 @@ class TaskExecutor:
failed_when_result = False
return failed_when_result
- if 'ansible_facts' in result:
+ if 'ansible_facts' in result and self._task.action not in C._ACTION_DEBUG:
if self._task.action in C._ACTION_WITH_CLEAN_FACTS:
vars_copy.update(result['ansible_facts'])
else:
@@ -763,7 +681,15 @@ class TaskExecutor:
result['_ansible_retry'] = True
result['retries'] = retries
display.debug('Retrying task, attempt %d of %d' % (attempt, retries))
- self._final_q.put(TaskResult(self._host.name, self._task._uuid, result, task_fields=self._task.dump_attrs()), block=False)
+ self._final_q.send_callback(
+ 'v2_runner_retry',
+ TaskResult(
+ self._host.name,
+ self._task._uuid,
+ result,
+ task_fields=self._task.dump_attrs()
+ )
+ )
time.sleep(delay)
self._handler = self._get_action_handler(connection=self._connection, templar=templar)
else:
@@ -777,7 +703,7 @@ class TaskExecutor:
if self._task.register:
variables[self._task.register] = result = wrap_var(result)
- if 'ansible_facts' in result:
+ if 'ansible_facts' in result and self._task.action not in C._ACTION_DEBUG:
if self._task.action in C._ACTION_WITH_CLEAN_FACTS:
variables.update(result['ansible_facts'])
else:
@@ -801,6 +727,10 @@ class TaskExecutor:
for k in plugin_vars:
result["_ansible_delegated_vars"][k] = cvars.get(k)
+ for requireshed in ('ansible_host', 'ansible_port', 'ansible_user', 'ansible_connection'):
+ if requireshed not in result["_ansible_delegated_vars"] and requireshed in cvars:
+ result["_ansible_delegated_vars"][requireshed] = cvars.get(requireshed)
+
# and return
display.debug("attempt loop complete, returning result")
return result
@@ -869,6 +799,15 @@ class TaskExecutor:
raise
else:
time_left -= self._task.poll
+ self._final_q.send_callback(
+ 'v2_runner_on_async_poll',
+ TaskResult(
+ self._host.name,
+ async_task, # We send the full task here, because the controller knows nothing about it, the TE created it
+ async_result,
+ task_fields=self._task.dump_attrs(),
+ ),
+ )
if int(async_result.get('finished', 0)) != 1:
if async_result.get('_ansible_parsed'):
@@ -876,6 +815,28 @@ class TaskExecutor:
else:
return dict(failed=True, msg="async task produced unparseable results", async_result=async_result)
else:
+ # If the async task finished, automatically cleanup the temporary
+ # status file left behind.
+ cleanup_task = Task().load(
+ {
+ 'async_status': {
+ 'jid': async_jid,
+ 'mode': 'cleanup',
+ },
+ 'environment': self._task.environment,
+ }
+ )
+ cleanup_handler = self._shared_loader_obj.action_loader.get(
+ 'ansible.legacy.async_status',
+ task=cleanup_task,
+ connection=self._connection,
+ play_context=self._play_context,
+ loader=self._loader,
+ templar=templar,
+ shared_loader_obj=self._shared_loader_obj,
+ )
+ cleanup_handler.run(task_vars=task_vars)
+ cleanup_handler.cleanup(force=True)
async_handler.cleanup(force=True)
return async_result
@@ -1025,6 +986,9 @@ class TaskExecutor:
# config system instead of directly accessing play_context.
task_keys['password'] = self._play_context.password
+ # Prevent task retries from overriding connection retries
+ del(task_keys['retries'])
+
# set options with 'templated vars' specific to this plugin and dependent ones
self._connection.set_options(task_keys=task_keys, var_options=options)
varnames.extend(self._set_plugin_options('shell', variables, templar, task_keys))
@@ -1071,6 +1035,9 @@ class TaskExecutor:
handler_name = self._task.action
elif all((module_prefix in C.NETWORK_GROUP_MODULES, self._shared_loader_obj.action_loader.has_plugin(network_action, collection_list=collections))):
handler_name = network_action
+ display.vvvv("Using network group action {handler} for {action}".format(handler=handler_name,
+ action=self._task.action),
+ host=self._play_context.remote_addr)
else:
# use ansible.legacy.normal to allow (historic) local action_plugins/ override without collections search
handler_name = 'ansible.legacy.normal'
@@ -1098,7 +1065,7 @@ def start_connection(play_context, variables, task_uuid):
Starts the persistent connection
'''
candidate_paths = [C.ANSIBLE_CONNECTION_PATH or os.path.dirname(sys.argv[0])]
- candidate_paths.extend(os.environ['PATH'].split(os.pathsep))
+ candidate_paths.extend(os.environ.get('PATH', '').split(os.pathsep))
for dirname in candidate_paths:
ansible_connection = os.path.join(dirname, 'ansible-connection')
if os.path.isfile(ansible_connection):
diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py
index f43bdc78..5647e4e6 100644
--- a/lib/ansible/executor/task_queue_manager.py
+++ b/lib/ansible/executor/task_queue_manager.py
@@ -22,7 +22,9 @@ __metaclass__ = type
import os
import sys
import tempfile
+import threading
import time
+import multiprocessing.queues
from ansible import constants as C
from ansible import context
@@ -30,18 +32,17 @@ from ansible.errors import AnsibleError
from ansible.executor.play_iterator import PlayIterator
from ansible.executor.stats import AggregateStats
from ansible.executor.task_result import TaskResult
-from ansible.module_utils.six import string_types
+from ansible.module_utils.six import PY3, string_types
from ansible.module_utils._text import to_text, to_native
-from ansible.playbook.block import Block
from ansible.playbook.play_context import PlayContext
+from ansible.playbook.task import Task
from ansible.plugins.loader import callback_loader, strategy_loader, module_loader
from ansible.plugins.callback import CallbackBase
from ansible.template import Templar
-from ansible.utils.collection_loader import AnsibleCollectionRef
-from ansible.utils.helpers import pct_to_int
from ansible.vars.hostvars import HostVars
from ansible.vars.reserved import warn_if_reserved
from ansible.utils.display import Display
+from ansible.utils.lock import lock_decorator
from ansible.utils.multiprocessing import context as multiprocessing_context
@@ -50,6 +51,36 @@ __all__ = ['TaskQueueManager']
display = Display()
+class CallbackSend:
+ def __init__(self, method_name, *args, **kwargs):
+ self.method_name = method_name
+ self.args = args
+ self.kwargs = kwargs
+
+
+class FinalQueue(multiprocessing.queues.Queue):
+ def __init__(self, *args, **kwargs):
+ if PY3:
+ kwargs['ctx'] = multiprocessing_context
+ super(FinalQueue, self).__init__(*args, **kwargs)
+
+ def send_callback(self, method_name, *args, **kwargs):
+ self.put(
+ CallbackSend(method_name, *args, **kwargs),
+ block=False
+ )
+
+ def send_task_result(self, *args, **kwargs):
+ if isinstance(args[0], TaskResult):
+ tr = args[0]
+ else:
+ tr = TaskResult(*args, **kwargs)
+ self.put(
+ tr,
+ block=False
+ )
+
+
class TaskQueueManager:
'''
@@ -99,10 +130,12 @@ class TaskQueueManager:
self._unreachable_hosts = dict()
try:
- self._final_q = multiprocessing_context.Queue()
+ self._final_q = FinalQueue()
except OSError as e:
raise AnsibleError("Unable to use multiprocessing, this is normally caused by lack of access to /dev/shm: %s" % to_native(e))
+ self._callback_lock = threading.Lock()
+
# A temporary file (opened pre-fork) used by connection
# plugins for inter-process locking.
self._connection_lockfile = tempfile.TemporaryFile()
@@ -142,8 +175,8 @@ class TaskQueueManager:
# get all configured loadable callbacks (adjacent, builtin)
callback_list = list(callback_loader.all(class_only=True))
- # add whitelisted callbacks that refer to collections, which might not appear in normal listing
- for c in C.DEFAULT_CALLBACK_WHITELIST:
+ # add enabled callbacks that refer to collections, which might not appear in normal listing
+ for c in C.CALLBACKS_ENABLED:
# load all, as collection ones might be using short/redirected names and not a fqcn
plugin = callback_loader.get(c, class_only=True)
@@ -159,9 +192,9 @@ class TaskQueueManager:
for callback_plugin in callback_list:
callback_type = getattr(callback_plugin, 'CALLBACK_TYPE', '')
- callback_needs_whitelist = getattr(callback_plugin, 'CALLBACK_NEEDS_WHITELIST', False)
+ callback_needs_enabled = getattr(callback_plugin, 'CALLBACK_NEEDS_ENABLED', getattr(callback_plugin, 'CALLBACK_NEEDS_WHITELIST', False))
- # try to get collection world name first
+ # try to get colleciotn world name first
cnames = getattr(callback_plugin, '_redirected_names', [])
if cnames:
# store the name the plugin was loaded as, as that's what we'll need to compare to the configured callback list later
@@ -180,10 +213,10 @@ class TaskQueueManager:
elif callback_name == 'tree' and self._run_tree:
# TODO: remove special case for tree, which is an adhoc cli option --tree
pass
- elif not self._run_additional_callbacks or (callback_needs_whitelist and (
+ elif not self._run_additional_callbacks or (callback_needs_enabled and (
# only run if not adhoc, or adhoc was specifically configured to run + check enabled list
- C.DEFAULT_CALLBACK_WHITELIST is None or callback_name not in C.DEFAULT_CALLBACK_WHITELIST)):
- # 2.x plugins shipped with ansible should require whitelisting, older or non shipped should load automatically
+ C.CALLBACKS_ENABLED is None or callback_name not in C.CALLBACKS_ENABLED)):
+ # 2.x plugins shipped with ansible should require enabling, older or non shipped should load automatically
continue
try:
@@ -219,8 +252,8 @@ class TaskQueueManager:
self.load_callbacks()
all_vars = self._variable_manager.get_vars(play=play)
- warn_if_reserved(all_vars)
templar = Templar(loader=self._loader, variables=all_vars)
+ warn_if_reserved(all_vars, templar.environment.globals.keys())
new_play = play.copy()
new_play.post_validate(templar)
@@ -278,14 +311,16 @@ class TaskQueueManager:
self._start_at_done = True
# and run the play using the strategy and cleanup on way out
- play_return = strategy.run(iterator, play_context)
+ try:
+ play_return = strategy.run(iterator, play_context)
+ finally:
+ strategy.cleanup()
+ self._cleanup_processes()
# now re-save the hosts that failed from the iterator to our internal list
for host_name in iterator.get_failed_hosts():
self._failed_hosts[host_name] = True
- strategy.cleanup()
- self._cleanup_processes()
return play_return
def cleanup(self):
@@ -357,6 +392,7 @@ class TaskQueueManager:
defunct = True
return defunct
+ @lock_decorator(attr='_callback_lock')
def send_callback(self, method_name, *args, **kwargs):
for callback_plugin in [self._stdout_callback] + self._callback_plugins:
# a plugin that set self.disabled to True will not be called
@@ -364,6 +400,10 @@ class TaskQueueManager:
if getattr(callback_plugin, 'disabled', False):
continue
+ # a plugin can opt in to implicit tasks (such as meta). It does this
+ # by declaring self.wants_implicit_tasks = True.
+ wants_implicit_tasks = getattr(callback_plugin, 'wants_implicit_tasks', False)
+
# try to find v2 method, fallback to v1 method, ignore callback if no method found
methods = []
for possible in [method_name, 'v2_on_any']:
@@ -375,6 +415,12 @@ class TaskQueueManager:
# send clean copies
new_args = []
+
+ # If we end up being given an implicit task, we'll set this flag in
+ # the loop below. If the plugin doesn't care about those, then we
+ # check and continue to the next iteration of the outer loop.
+ is_implicit_task = False
+
for arg in args:
# FIXME: add play/task cleaners
if isinstance(arg, TaskResult):
@@ -384,6 +430,12 @@ class TaskQueueManager:
else:
new_args.append(arg)
+ if isinstance(arg, Task) and arg.implicit:
+ is_implicit_task = True
+
+ if is_implicit_task and not wants_implicit_tasks:
+ continue
+
for method in methods:
try:
method(*new_args, **kwargs)
diff --git a/lib/ansible/galaxy/api.py b/lib/ansible/galaxy/api.py
index 4dd3cded..5bd37149 100644
--- a/lib/ansible/galaxy/api.py
+++ b/lib/ansible/galaxy/api.py
@@ -5,23 +5,30 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+import collections
+import datetime
+import functools
import hashlib
import json
import os
+import stat
import tarfile
-import uuid
import time
+import threading
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.galaxy.user_agent import user_agent
+from ansible.module_utils.api import retry_with_delays_and_condition
+from ansible.module_utils.api import generate_jittered_backoff
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves.urllib.error import HTTPError
-from ansible.module_utils.six.moves.urllib.parse import quote as urlquote, urlencode, urlparse
+from ansible.module_utils.six.moves.urllib.parse import quote as urlquote, urlencode, urlparse, parse_qs, urljoin
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.urls import open_url, prepare_multipart
from ansible.utils.display import Display
from ansible.utils.hashing import secure_hash_s
+from ansible.utils.path import makedirs_safe
try:
from urllib.parse import urlparse
@@ -30,6 +37,27 @@ except ImportError:
from urlparse import urlparse
display = Display()
+_CACHE_LOCK = threading.Lock()
+COLLECTION_PAGE_SIZE = 100
+RETRY_HTTP_ERROR_CODES = [ # TODO: Allow user-configuration
+ 429, # Too Many Requests
+ 520, # Galaxy rate limit error code (Cloudflare unknown error)
+]
+
+
+def cache_lock(func):
+ def wrapped(*args, **kwargs):
+ with _CACHE_LOCK:
+ return func(*args, **kwargs)
+
+ return wrapped
+
+
+def is_rate_limit_exception(exception):
+ # Note: cloud.redhat.com masks rate limit errors with 403 (Forbidden) error codes.
+ # Since 403 could reflect the actual problem (such as an expired token), we should
+ # not retry by default.
+ return isinstance(exception, GalaxyError) and exception.http_code in RETRY_HTTP_ERROR_CODES
def g_connect(versions):
@@ -53,7 +81,7 @@ def g_connect(versions):
n_url = 'https://galaxy.ansible.com/api/'
try:
- data = self._call_galaxy(n_url, method='GET', error_context_msg=error_context_msg)
+ data = self._call_galaxy(n_url, method='GET', error_context_msg=error_context_msg, cache=True)
except (AnsibleError, GalaxyError, ValueError, KeyError) as err:
# Either the URL doesnt exist, or other error. Or the URL exists, but isn't a galaxy API
# root (not JSON, no 'available_versions') so try appending '/api/'
@@ -63,7 +91,7 @@ def g_connect(versions):
# Let exceptions here bubble up but raise the original if this returns a 404 (/api/ wasn't found).
n_url = _urljoin(n_url, '/api/')
try:
- data = self._call_galaxy(n_url, method='GET', error_context_msg=error_context_msg)
+ data = self._call_galaxy(n_url, method='GET', error_context_msg=error_context_msg, cache=True)
except GalaxyError as new_err:
if new_err.http_code == 404:
raise err
@@ -100,6 +128,55 @@ def g_connect(versions):
return decorator
+def get_cache_id(url):
+ """ Gets the cache ID for the URL specified. """
+ url_info = urlparse(url)
+
+ port = None
+ try:
+ port = url_info.port
+ except ValueError:
+ pass # While the URL is probably invalid, let the caller figure that out when using it
+
+ # Cannot use netloc because it could contain credentials if the server specified had them in there.
+ return '%s:%s' % (url_info.hostname, port or '')
+
+
+@cache_lock
+def _load_cache(b_cache_path):
+ """ Loads the cache file requested if possible. The file must not be world writable. """
+ cache_version = 1
+
+ if not os.path.isfile(b_cache_path):
+ display.vvvv("Creating Galaxy API response cache file at '%s'" % to_text(b_cache_path))
+ with open(b_cache_path, 'w'):
+ os.chmod(b_cache_path, 0o600)
+
+ cache_mode = os.stat(b_cache_path).st_mode
+ if cache_mode & stat.S_IWOTH:
+ display.warning("Galaxy cache has world writable access (%s), ignoring it as a cache source."
+ % to_text(b_cache_path))
+ return
+
+ with open(b_cache_path, mode='rb') as fd:
+ json_val = to_text(fd.read(), errors='surrogate_or_strict')
+
+ try:
+ cache = json.loads(json_val)
+ except ValueError:
+ cache = None
+
+ if not isinstance(cache, dict) or cache.get('version', None) != cache_version:
+ display.vvvv("Galaxy cache file at '%s' has an invalid version, clearing" % to_text(b_cache_path))
+ cache = {'version': cache_version}
+
+ # Set the cache after we've cleared the existing entries
+ with open(b_cache_path, mode='wb') as fd:
+ fd.write(to_bytes(json.dumps(cache), errors='surrogate_or_strict'))
+
+ return cache
+
+
def _urljoin(*args):
return '/'.join(to_native(a, errors='surrogate_or_strict').strip('/') for a in args + ('',) if a)
@@ -144,6 +221,11 @@ class GalaxyError(AnsibleError):
self.message = to_native(full_error_msg)
+# Keep the raw string results for the date. It's too complex to parse as a datetime object and the various APIs return
+# them in different formats.
+CollectionMetadata = collections.namedtuple('CollectionMetadata', ['namespace', 'name', 'created_str', 'modified_str'])
+
+
class CollectionVersionMetadata:
def __init__(self, namespace, name, version, download_url, artifact_sha256, dependencies):
@@ -166,10 +248,17 @@ class CollectionVersionMetadata:
self.dependencies = dependencies
+@functools.total_ordering
class GalaxyAPI:
""" This class is meant to be used as a API client for an Ansible Galaxy server """
- def __init__(self, galaxy, name, url, username=None, password=None, token=None, validate_certs=True):
+ def __init__(
+ self, galaxy, name, url,
+ username=None, password=None, token=None, validate_certs=True,
+ available_api_versions=None,
+ clear_response_cache=False, no_cache=True,
+ priority=float('inf'),
+ ):
self.galaxy = galaxy
self.name = name
self.username = username
@@ -177,17 +266,111 @@ class GalaxyAPI:
self.token = token
self.api_server = url
self.validate_certs = validate_certs
- self._available_api_versions = {}
+ self._available_api_versions = available_api_versions or {}
+ self._priority = priority
+
+ b_cache_dir = to_bytes(C.config.get_config_value('GALAXY_CACHE_DIR'), errors='surrogate_or_strict')
+ makedirs_safe(b_cache_dir, mode=0o700)
+ self._b_cache_path = os.path.join(b_cache_dir, b'api.json')
+
+ if clear_response_cache:
+ with _CACHE_LOCK:
+ if os.path.exists(self._b_cache_path):
+ display.vvvv("Clearing cache file (%s)" % to_text(self._b_cache_path))
+ os.remove(self._b_cache_path)
+
+ self._cache = None
+ if not no_cache:
+ self._cache = _load_cache(self._b_cache_path)
display.debug('Validate TLS certificates for %s: %s' % (self.api_server, self.validate_certs))
+ def __str__(self):
+ # type: (GalaxyAPI) -> str
+ """Render GalaxyAPI as a native string representation."""
+ return to_native(self.name)
+
+ def __unicode__(self):
+ # type: (GalaxyAPI) -> unicode
+ """Render GalaxyAPI as a unicode/text string representation."""
+ return to_text(self.name)
+
+ def __repr__(self):
+ # type: (GalaxyAPI) -> str
+ """Render GalaxyAPI as an inspectable string representation."""
+ return (
+ '<{instance!s} "{name!s}" @ {url!s} with priority {priority!s}>'.
+ format(
+ instance=self, name=self.name,
+ priority=self._priority, url=self.api_server,
+ )
+ )
+
+ def __lt__(self, other_galaxy_api):
+ # type: (GalaxyAPI, GalaxyAPI) -> Union[bool, 'NotImplemented']
+ """Return whether the instance priority is higher than other."""
+ if not isinstance(other_galaxy_api, self.__class__):
+ return NotImplemented
+
+ return (
+ self._priority > other_galaxy_api._priority or
+ self.name < self.name
+ )
+
@property
@g_connect(['v1', 'v2', 'v3'])
def available_api_versions(self):
# Calling g_connect will populate self._available_api_versions
return self._available_api_versions
- def _call_galaxy(self, url, args=None, headers=None, method=None, auth_required=False, error_context_msg=None):
+ @retry_with_delays_and_condition(
+ backoff_iterator=generate_jittered_backoff(retries=6, delay_base=2, delay_threshold=40),
+ should_retry_error=is_rate_limit_exception
+ )
+ def _call_galaxy(self, url, args=None, headers=None, method=None, auth_required=False, error_context_msg=None,
+ cache=False):
+ url_info = urlparse(url)
+ cache_id = get_cache_id(url)
+ query = parse_qs(url_info.query)
+ if cache and self._cache:
+ server_cache = self._cache.setdefault(cache_id, {})
+ iso_datetime_format = '%Y-%m-%dT%H:%M:%SZ'
+
+ valid = False
+ if url_info.path in server_cache:
+ expires = datetime.datetime.strptime(server_cache[url_info.path]['expires'], iso_datetime_format)
+ valid = datetime.datetime.utcnow() < expires
+
+ is_paginated_url = 'page' in query or 'offset' in query
+ if valid and not is_paginated_url:
+ # Got a hit on the cache and we aren't getting a paginated response
+ path_cache = server_cache[url_info.path]
+ if path_cache.get('paginated'):
+ if '/v3/' in url_info.path:
+ res = {'links': {'next': None}}
+ else:
+ res = {'next': None}
+
+ # Technically some v3 paginated APIs return in 'data' but the caller checks the keys for this so
+ # always returning the cache under results is fine.
+ res['results'] = []
+ for result in path_cache['results']:
+ res['results'].append(result)
+
+ else:
+ res = path_cache['results']
+
+ return res
+
+ elif not is_paginated_url:
+ # The cache entry had expired or does not exist, start a new blank entry to be filled later.
+ expires = datetime.datetime.utcnow()
+ expires += datetime.timedelta(days=1)
+ server_cache[url_info.path] = {
+ 'expires': expires.strftime(iso_datetime_format),
+ 'paginated': False,
+ }
+
headers = headers or {}
self._add_auth_token(headers, url, required=auth_required)
@@ -207,6 +390,25 @@ class GalaxyAPI:
raise AnsibleError("Failed to parse Galaxy response from '%s' as JSON:\n%s"
% (resp.url, to_native(resp_data)))
+ if cache and self._cache:
+ path_cache = self._cache[cache_id][url_info.path]
+
+ # v3 can return data or results for paginated results. Scan the result so we can determine what to cache.
+ paginated_key = None
+ for key in ['data', 'results']:
+ if key in data:
+ paginated_key = key
+ break
+
+ if paginated_key:
+ path_cache['paginated'] = True
+ results = path_cache.setdefault('results', [])
+ for result in data[paginated_key]:
+ results.append(result)
+
+ else:
+ path_cache['results'] = data
+
return data
def _add_auth_token(self, headers, url, token_type=None, required=False):
@@ -221,6 +423,11 @@ class GalaxyAPI:
if self.token:
headers.update(self.token.headers())
+ @cache_lock
+ def _set_cache(self):
+ with open(self._b_cache_path, mode='wb') as fd:
+ fd.write(to_bytes(json.dumps(self._cache), errors='surrogate_or_strict'))
+
@g_connect(['v1'])
def authenticate(self, github_token):
"""
@@ -479,8 +686,16 @@ class GalaxyAPI:
wait = 2
while timeout == 0 or (time.time() - start) < timeout:
- data = self._call_galaxy(full_url, method='GET', auth_required=True,
- error_context_msg='Error when getting import task results at %s' % full_url)
+ try:
+ data = self._call_galaxy(full_url, method='GET', auth_required=True,
+ error_context_msg='Error when getting import task results at %s' % full_url)
+ except GalaxyError as e:
+ if e.http_code != 404:
+ raise
+ # The import job may not have started, and as such, the task url may not yet exist
+ display.vvv('Galaxy import process has not started, wait %s seconds before trying again' % wait)
+ time.sleep(wait)
+ continue
state = data.get('state', 'waiting')
@@ -513,6 +728,39 @@ class GalaxyAPI:
raise AnsibleError("Galaxy import process failed: %s (Code: %s)" % (description, code))
@g_connect(['v2', 'v3'])
+ def get_collection_metadata(self, namespace, name):
+ """
+ Gets the collection information from the Galaxy server about a specific Collection.
+
+ :param namespace: The collection namespace.
+ :param name: The collection name.
+ return: CollectionMetadata about the collection.
+ """
+ if 'v3' in self.available_api_versions:
+ api_path = self.available_api_versions['v3']
+ field_map = [
+ ('created_str', 'created_at'),
+ ('modified_str', 'updated_at'),
+ ]
+ else:
+ api_path = self.available_api_versions['v2']
+ field_map = [
+ ('created_str', 'created'),
+ ('modified_str', 'modified'),
+ ]
+
+ info_url = _urljoin(self.api_server, api_path, 'collections', namespace, name, '/')
+ error_context_msg = 'Error when getting the collection info for %s.%s from %s (%s)' \
+ % (namespace, name, self.name, self.api_server)
+ data = self._call_galaxy(info_url, error_context_msg=error_context_msg)
+
+ metadata = {}
+ for name, api_field in field_map:
+ metadata[name] = data.get(api_field, None)
+
+ return CollectionMetadata(namespace, name, **metadata)
+
+ @g_connect(['v2', 'v3'])
def get_collection_version_metadata(self, namespace, name, version):
"""
Gets the collection information from the Galaxy server about a specific Collection version.
@@ -528,7 +776,8 @@ class GalaxyAPI:
n_collection_url = _urljoin(*url_paths)
error_context_msg = 'Error when getting collection version metadata for %s.%s:%s from %s (%s)' \
% (namespace, name, version, self.name, self.api_server)
- data = self._call_galaxy(n_collection_url, error_context_msg=error_context_msg)
+ data = self._call_galaxy(n_collection_url, error_context_msg=error_context_msg, cache=True)
+ self._set_cache()
return CollectionVersionMetadata(data['namespace']['name'], data['collection']['name'], data['version'],
data['download_url'], data['artifact']['sha256'],
@@ -546,19 +795,56 @@ class GalaxyAPI:
relative_link = False
if 'v3' in self.available_api_versions:
api_path = self.available_api_versions['v3']
- results_key = 'data'
pagination_path = ['links', 'next']
relative_link = True # AH pagination results are relative an not an absolute URI.
else:
api_path = self.available_api_versions['v2']
- results_key = 'results'
pagination_path = ['next']
- n_url = _urljoin(self.api_server, api_path, 'collections', namespace, name, 'versions', '/')
+ page_size_name = 'limit' if 'v3' in self.available_api_versions else 'page_size'
+ versions_url = _urljoin(self.api_server, api_path, 'collections', namespace, name, 'versions', '/?%s=%d' % (page_size_name, COLLECTION_PAGE_SIZE))
+ versions_url_info = urlparse(versions_url)
+
+ # We should only rely on the cache if the collection has not changed. This may slow things down but it ensures
+ # we are not waiting a day before finding any new collections that have been published.
+ if self._cache:
+ server_cache = self._cache.setdefault(get_cache_id(versions_url), {})
+ modified_cache = server_cache.setdefault('modified', {})
+
+ try:
+ modified_date = self.get_collection_metadata(namespace, name).modified_str
+ except GalaxyError as err:
+ if err.http_code != 404:
+ raise
+ # No collection found, return an empty list to keep things consistent with the various APIs
+ return []
+
+ cached_modified_date = modified_cache.get('%s.%s' % (namespace, name), None)
+ if cached_modified_date != modified_date:
+ modified_cache['%s.%s' % (namespace, name)] = modified_date
+ if versions_url_info.path in server_cache:
+ del server_cache[versions_url_info.path]
+
+ self._set_cache()
error_context_msg = 'Error when getting available collection versions for %s.%s from %s (%s)' \
% (namespace, name, self.name, self.api_server)
- data = self._call_galaxy(n_url, error_context_msg=error_context_msg)
+
+ try:
+ data = self._call_galaxy(versions_url, error_context_msg=error_context_msg, cache=True)
+ except GalaxyError as err:
+ if err.http_code != 404:
+ raise
+ # v3 doesn't raise a 404 so we need to mimick the empty response from APIs that do.
+ return []
+
+ if 'data' in data:
+ # v3 automation-hub is the only known API that uses `data`
+ # since v3 pulp_ansible does not, we cannot rely on version
+ # to indicate which key to use
+ results_key = 'data'
+ else:
+ results_key = 'results'
versions = []
while True:
@@ -573,9 +859,13 @@ class GalaxyAPI:
elif relative_link:
# TODO: This assumes the pagination result is relative to the root server. Will need to be verified
# with someone who knows the AH API.
- next_link = n_url.replace(urlparse(n_url).path, next_link)
+
+ # Remove the query string from the versions_url to use the next_link's query
+ versions_url = urljoin(versions_url, urlparse(versions_url).path)
+ next_link = versions_url.replace(versions_url_info.path, next_link)
data = self._call_galaxy(to_native(next_link, errors='surrogate_or_strict'),
- error_context_msg=error_context_msg)
+ error_context_msg=error_context_msg, cache=True)
+ self._set_cache()
return versions
diff --git a/lib/ansible/galaxy/collection.py b/lib/ansible/galaxy/collection.py
deleted file mode 100644
index 054a8a57..00000000
--- a/lib/ansible/galaxy/collection.py
+++ /dev/null
@@ -1,1551 +0,0 @@
-# Copyright: (c) 2019, Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import errno
-import fnmatch
-import json
-import operator
-import os
-import shutil
-import stat
-import sys
-import tarfile
-import tempfile
-import threading
-import time
-import yaml
-
-from collections import namedtuple
-from contextlib import contextmanager
-from distutils.version import LooseVersion
-from hashlib import sha256
-from io import BytesIO
-from yaml.error import YAMLError
-
-try:
- import queue
-except ImportError:
- import Queue as queue # Python 2
-
-import ansible.constants as C
-from ansible.errors import AnsibleError
-from ansible.galaxy import get_collections_galaxy_meta_info
-from ansible.galaxy.api import CollectionVersionMetadata, GalaxyError
-from ansible.galaxy.user_agent import user_agent
-from ansible.module_utils import six
-from ansible.module_utils._text import to_bytes, to_native, to_text
-from ansible.utils.collection_loader import AnsibleCollectionRef
-from ansible.utils.display import Display
-from ansible.utils.galaxy import scm_archive_collection
-from ansible.utils.hashing import secure_hash, secure_hash_s
-from ansible.utils.version import SemanticVersion
-from ansible.module_utils.urls import open_url
-
-urlparse = six.moves.urllib.parse.urlparse
-urldefrag = six.moves.urllib.parse.urldefrag
-urllib_error = six.moves.urllib.error
-
-
-display = Display()
-
-MANIFEST_FORMAT = 1
-
-ModifiedContent = namedtuple('ModifiedContent', ['filename', 'expected', 'installed'])
-
-
-class CollectionRequirement:
-
- _FILE_MAPPING = [(b'MANIFEST.json', 'manifest_file'), (b'FILES.json', 'files_file')]
-
- def __init__(self, namespace, name, b_path, api, versions, requirement, force, parent=None, metadata=None,
- files=None, skip=False, allow_pre_releases=False):
- """Represents a collection requirement, the versions that are available to be installed as well as any
- dependencies the collection has.
-
- :param namespace: The collection namespace.
- :param name: The collection name.
- :param b_path: Byte str of the path to the collection tarball if it has already been downloaded.
- :param api: The GalaxyAPI to use if the collection is from Galaxy.
- :param versions: A list of versions of the collection that are available.
- :param requirement: The version requirement string used to verify the list of versions fit the requirements.
- :param force: Whether the force flag applied to the collection.
- :param parent: The name of the parent the collection is a dependency of.
- :param metadata: The galaxy.api.CollectionVersionMetadata that has already been retrieved from the Galaxy
- server.
- :param files: The files that exist inside the collection. This is based on the FILES.json file inside the
- collection artifact.
- :param skip: Whether to skip installing the collection. Should be set if the collection is already installed
- and force is not set.
- :param allow_pre_releases: Whether to skip pre-release versions of collections.
- """
- self.namespace = namespace
- self.name = name
- self.b_path = b_path
- self.api = api
- self._versions = set(versions)
- self.force = force
- self.skip = skip
- self.required_by = []
- self.allow_pre_releases = allow_pre_releases
-
- self._metadata = metadata
- self._files = files
-
- self.add_requirement(parent, requirement)
-
- def __str__(self):
- return to_native("%s.%s" % (self.namespace, self.name))
-
- def __unicode__(self):
- return u"%s.%s" % (self.namespace, self.name)
-
- @property
- def metadata(self):
- self._get_metadata()
- return self._metadata
-
- @property
- def versions(self):
- if self.allow_pre_releases:
- return self._versions
- return set(v for v in self._versions if v == '*' or not SemanticVersion(v).is_prerelease)
-
- @versions.setter
- def versions(self, value):
- self._versions = set(value)
-
- @property
- def pre_releases(self):
- return set(v for v in self._versions if SemanticVersion(v).is_prerelease)
-
- @property
- def latest_version(self):
- try:
- return max([v for v in self.versions if v != '*'], key=SemanticVersion)
- except ValueError: # ValueError: max() arg is an empty sequence
- return '*'
-
- @property
- def dependencies(self):
- if not self._metadata:
- if len(self.versions) > 1:
- return {}
- self._get_metadata()
-
- dependencies = self._metadata.dependencies
-
- if dependencies is None:
- return {}
-
- return dependencies
-
- @staticmethod
- def artifact_info(b_path):
- """Load the manifest data from the MANIFEST.json and FILES.json. If the files exist, return a dict containing the keys 'files_file' and 'manifest_file'.
- :param b_path: The directory of a collection.
- """
- info = {}
- for b_file_name, property_name in CollectionRequirement._FILE_MAPPING:
- b_file_path = os.path.join(b_path, b_file_name)
- if not os.path.exists(b_file_path):
- continue
- with open(b_file_path, 'rb') as file_obj:
- try:
- info[property_name] = json.loads(to_text(file_obj.read(), errors='surrogate_or_strict'))
- except ValueError:
- raise AnsibleError("Collection file at '%s' does not contain a valid json string." % to_native(b_file_path))
- return info
-
- @staticmethod
- def galaxy_metadata(b_path):
- """Generate the manifest data from the galaxy.yml file.
- If the galaxy.yml exists, return a dictionary containing the keys 'files_file' and 'manifest_file'.
-
- :param b_path: The directory of a collection.
- """
- b_galaxy_path = get_galaxy_metadata_path(b_path)
- info = {}
- if os.path.exists(b_galaxy_path):
- collection_meta = _get_galaxy_yml(b_galaxy_path)
- info['files_file'] = _build_files_manifest(b_path, collection_meta['namespace'], collection_meta['name'], collection_meta['build_ignore'])
- info['manifest_file'] = _build_manifest(**collection_meta)
- return info
-
- @staticmethod
- def collection_info(b_path, fallback_metadata=False):
- info = CollectionRequirement.artifact_info(b_path)
- if info or not fallback_metadata:
- return info
- return CollectionRequirement.galaxy_metadata(b_path)
-
- def add_requirement(self, parent, requirement):
- self.required_by.append((parent, requirement))
- new_versions = set(v for v in self.versions if self._meets_requirements(v, requirement, parent))
- if len(new_versions) == 0:
- if self.skip:
- force_flag = '--force-with-deps' if parent else '--force'
- version = self.latest_version if self.latest_version != '*' else 'unknown'
- msg = "Cannot meet requirement %s:%s as it is already installed at version '%s'. Use %s to overwrite" \
- % (to_text(self), requirement, version, force_flag)
- raise AnsibleError(msg)
- elif parent is None:
- msg = "Cannot meet requirement %s for dependency %s" % (requirement, to_text(self))
- else:
- msg = "Cannot meet dependency requirement '%s:%s' for collection %s" \
- % (to_text(self), requirement, parent)
-
- collection_source = to_text(self.b_path, nonstring='passthru') or self.api.api_server
- req_by = "\n".join(
- "\t%s - '%s:%s'" % (to_text(p) if p else 'base', to_text(self), r)
- for p, r in self.required_by
- )
-
- versions = ", ".join(sorted(self.versions, key=SemanticVersion))
- if not self.versions and self.pre_releases:
- pre_release_msg = (
- '\nThis collection only contains pre-releases. Utilize `--pre` to install pre-releases, or '
- 'explicitly provide the pre-release version.'
- )
- else:
- pre_release_msg = ''
-
- raise AnsibleError(
- "%s from source '%s'. Available versions before last requirement added: %s\nRequirements from:\n%s%s"
- % (msg, collection_source, versions, req_by, pre_release_msg)
- )
-
- self.versions = new_versions
-
- def download(self, b_path):
- download_url = self._metadata.download_url
- artifact_hash = self._metadata.artifact_sha256
- headers = {}
- self.api._add_auth_token(headers, download_url, required=False)
-
- b_collection_path = _download_file(download_url, b_path, artifact_hash, self.api.validate_certs,
- headers=headers)
-
- return to_text(b_collection_path, errors='surrogate_or_strict')
-
- def install(self, path, b_temp_path):
- if self.skip:
- display.display("Skipping '%s' as it is already installed" % to_text(self))
- return
-
- # Install if it is not
- collection_path = os.path.join(path, self.namespace, self.name)
- b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict')
- display.display("Installing '%s:%s' to '%s'" % (to_text(self), self.latest_version, collection_path))
-
- if self.b_path is None:
- self.b_path = self.download(b_temp_path)
-
- if os.path.exists(b_collection_path):
- shutil.rmtree(b_collection_path)
-
- if os.path.isfile(self.b_path):
- self.install_artifact(b_collection_path, b_temp_path)
- else:
- self.install_scm(b_collection_path)
-
- display.display("%s (%s) was installed successfully" % (to_text(self), self.latest_version))
-
- def install_artifact(self, b_collection_path, b_temp_path):
-
- try:
- with tarfile.open(self.b_path, mode='r') as collection_tar:
- files_member_obj = collection_tar.getmember('FILES.json')
- with _tarfile_extract(collection_tar, files_member_obj) as (dummy, files_obj):
- files = json.loads(to_text(files_obj.read(), errors='surrogate_or_strict'))
-
- _extract_tar_file(collection_tar, 'MANIFEST.json', b_collection_path, b_temp_path)
- _extract_tar_file(collection_tar, 'FILES.json', b_collection_path, b_temp_path)
-
- for file_info in files['files']:
- file_name = file_info['name']
- if file_name == '.':
- continue
-
- if file_info['ftype'] == 'file':
- _extract_tar_file(collection_tar, file_name, b_collection_path, b_temp_path,
- expected_hash=file_info['chksum_sha256'])
-
- else:
- _extract_tar_dir(collection_tar, file_name, b_collection_path)
-
- except Exception:
- # Ensure we don't leave the dir behind in case of a failure.
- shutil.rmtree(b_collection_path)
-
- b_namespace_path = os.path.dirname(b_collection_path)
- if not os.listdir(b_namespace_path):
- os.rmdir(b_namespace_path)
-
- raise
-
- def install_scm(self, b_collection_output_path):
- """Install the collection from source control into given dir.
-
- Generates the Ansible collection artifact data from a galaxy.yml and installs the artifact to a directory.
- This should follow the same pattern as build_collection, but instead of creating an artifact, install it.
- :param b_collection_output_path: The installation directory for the collection artifact.
- :raises AnsibleError: If no collection metadata found.
- """
- b_collection_path = self.b_path
-
- b_galaxy_path = get_galaxy_metadata_path(b_collection_path)
- if not os.path.exists(b_galaxy_path):
- raise AnsibleError("The collection galaxy.yml path '%s' does not exist." % to_native(b_galaxy_path))
-
- info = CollectionRequirement.galaxy_metadata(b_collection_path)
-
- collection_manifest = info['manifest_file']
- collection_meta = collection_manifest['collection_info']
- file_manifest = info['files_file']
-
- _build_collection_dir(b_collection_path, b_collection_output_path, collection_manifest, file_manifest)
-
- collection_name = "%s.%s" % (collection_manifest['collection_info']['namespace'],
- collection_manifest['collection_info']['name'])
- display.display('Created collection for %s at %s' % (collection_name, to_text(b_collection_output_path)))
-
- def set_latest_version(self):
- self.versions = set([self.latest_version])
- self._get_metadata()
-
- def verify(self, remote_collection, path, b_temp_tar_path):
- if not self.skip:
- display.display("'%s' has not been installed, nothing to verify" % (to_text(self)))
- return
-
- collection_path = os.path.join(path, self.namespace, self.name)
- b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict')
-
- display.vvv("Verifying '%s:%s'." % (to_text(self), self.latest_version))
- display.vvv("Installed collection found at '%s'" % collection_path)
- display.vvv("Remote collection found at '%s'" % remote_collection.metadata.download_url)
-
- # Compare installed version versus requirement version
- if self.latest_version != remote_collection.latest_version:
- err = "%s has the version '%s' but is being compared to '%s'" % (to_text(self), self.latest_version, remote_collection.latest_version)
- display.display(err)
- return
-
- modified_content = []
-
- # Verify the manifest hash matches before verifying the file manifest
- expected_hash = _get_tar_file_hash(b_temp_tar_path, 'MANIFEST.json')
- self._verify_file_hash(b_collection_path, 'MANIFEST.json', expected_hash, modified_content)
- manifest = _get_json_from_tar_file(b_temp_tar_path, 'MANIFEST.json')
-
- # Use the manifest to verify the file manifest checksum
- file_manifest_data = manifest['file_manifest_file']
- file_manifest_filename = file_manifest_data['name']
- expected_hash = file_manifest_data['chksum_%s' % file_manifest_data['chksum_type']]
-
- # Verify the file manifest before using it to verify individual files
- self._verify_file_hash(b_collection_path, file_manifest_filename, expected_hash, modified_content)
- file_manifest = _get_json_from_tar_file(b_temp_tar_path, file_manifest_filename)
-
- # Use the file manifest to verify individual file checksums
- for manifest_data in file_manifest['files']:
- if manifest_data['ftype'] == 'file':
- expected_hash = manifest_data['chksum_%s' % manifest_data['chksum_type']]
- self._verify_file_hash(b_collection_path, manifest_data['name'], expected_hash, modified_content)
-
- if modified_content:
- display.display("Collection %s contains modified content in the following files:" % to_text(self))
- display.display(to_text(self))
- display.vvv(to_text(self.b_path))
- for content_change in modified_content:
- display.display(' %s' % content_change.filename)
- display.vvv(" Expected: %s\n Found: %s" % (content_change.expected, content_change.installed))
- else:
- display.vvv("Successfully verified that checksums for '%s:%s' match the remote collection" % (to_text(self), self.latest_version))
-
- def _verify_file_hash(self, b_path, filename, expected_hash, error_queue):
- b_file_path = to_bytes(os.path.join(to_text(b_path), filename), errors='surrogate_or_strict')
-
- if not os.path.isfile(b_file_path):
- actual_hash = None
- else:
- with open(b_file_path, mode='rb') as file_object:
- actual_hash = _consume_file(file_object)
-
- if expected_hash != actual_hash:
- error_queue.append(ModifiedContent(filename=filename, expected=expected_hash, installed=actual_hash))
-
- def _get_metadata(self):
- if self._metadata:
- return
- self._metadata = self.api.get_collection_version_metadata(self.namespace, self.name, self.latest_version)
-
- def _meets_requirements(self, version, requirements, parent):
- """
- Supports version identifiers can be '==', '!=', '>', '>=', '<', '<=', '*'. Each requirement is delimited by ','
- """
- op_map = {
- '!=': operator.ne,
- '==': operator.eq,
- '=': operator.eq,
- '>=': operator.ge,
- '>': operator.gt,
- '<=': operator.le,
- '<': operator.lt,
- }
-
- for req in list(requirements.split(',')):
- op_pos = 2 if len(req) > 1 and req[1] == '=' else 1
- op = op_map.get(req[:op_pos])
-
- requirement = req[op_pos:]
- if not op:
- requirement = req
- op = operator.eq
-
- # In the case we are checking a new requirement on a base requirement (parent != None) we can't accept
- # version as '*' (unknown version) unless the requirement is also '*'.
- if parent and version == '*' and requirement != '*':
- display.warning("Failed to validate the collection requirement '%s:%s' for %s when the existing "
- "install does not have a version set, the collection may not work."
- % (to_text(self), req, parent))
- continue
- elif requirement == '*' or version == '*':
- continue
-
- if not op(SemanticVersion(version), SemanticVersion.from_loose_version(LooseVersion(requirement))):
- break
- else:
- return True
-
- # The loop was broken early, it does not meet all the requirements
- return False
-
- @staticmethod
- def from_tar(b_path, force, parent=None):
- if not tarfile.is_tarfile(b_path):
- raise AnsibleError("Collection artifact at '%s' is not a valid tar file." % to_native(b_path))
-
- info = {}
- with tarfile.open(b_path, mode='r') as collection_tar:
- for b_member_name, property_name in CollectionRequirement._FILE_MAPPING:
- n_member_name = to_native(b_member_name)
- try:
- member = collection_tar.getmember(n_member_name)
- except KeyError:
- raise AnsibleError("Collection at '%s' does not contain the required file %s."
- % (to_native(b_path), n_member_name))
-
- with _tarfile_extract(collection_tar, member) as (dummy, member_obj):
- try:
- info[property_name] = json.loads(to_text(member_obj.read(), errors='surrogate_or_strict'))
- except ValueError:
- raise AnsibleError("Collection tar file member %s does not contain a valid json string."
- % n_member_name)
-
- meta = info['manifest_file']['collection_info']
- files = info['files_file']['files']
-
- namespace = meta['namespace']
- name = meta['name']
- version = meta['version']
- meta = CollectionVersionMetadata(namespace, name, version, None, None, meta['dependencies'])
-
- if SemanticVersion(version).is_prerelease:
- allow_pre_release = True
- else:
- allow_pre_release = False
-
- return CollectionRequirement(namespace, name, b_path, None, [version], version, force, parent=parent,
- metadata=meta, files=files, allow_pre_releases=allow_pre_release)
-
- @staticmethod
- def from_path(b_path, force, parent=None, fallback_metadata=False, skip=True):
- info = CollectionRequirement.collection_info(b_path, fallback_metadata)
-
- allow_pre_release = False
- if 'manifest_file' in info:
- manifest = info['manifest_file']['collection_info']
- namespace = manifest['namespace']
- name = manifest['name']
- version = to_text(manifest['version'], errors='surrogate_or_strict')
-
- try:
- _v = SemanticVersion()
- _v.parse(version)
- if _v.is_prerelease:
- allow_pre_release = True
- except ValueError:
- display.warning("Collection at '%s' does not have a valid version set, falling back to '*'. Found "
- "version: '%s'" % (to_text(b_path), version))
- version = '*'
-
- dependencies = manifest['dependencies']
- else:
- if fallback_metadata:
- warning = "Collection at '%s' does not have a galaxy.yml or a MANIFEST.json file, cannot detect version."
- else:
- warning = "Collection at '%s' does not have a MANIFEST.json file, cannot detect version."
- display.warning(warning % to_text(b_path))
- parent_dir, name = os.path.split(to_text(b_path, errors='surrogate_or_strict'))
- namespace = os.path.split(parent_dir)[1]
-
- version = '*'
- dependencies = {}
-
- meta = CollectionVersionMetadata(namespace, name, version, None, None, dependencies)
-
- files = info.get('files_file', {}).get('files', {})
-
- return CollectionRequirement(namespace, name, b_path, None, [version], version, force, parent=parent,
- metadata=meta, files=files, skip=skip, allow_pre_releases=allow_pre_release)
-
- @staticmethod
- def from_name(collection, apis, requirement, force, parent=None, allow_pre_release=False):
- namespace, name = collection.split('.', 1)
- galaxy_meta = None
-
- for api in apis:
- try:
- if not (requirement == '*' or requirement.startswith('<') or requirement.startswith('>') or
- requirement.startswith('!=')):
- # Exact requirement
- allow_pre_release = True
-
- if requirement.startswith('='):
- requirement = requirement.lstrip('=')
-
- resp = api.get_collection_version_metadata(namespace, name, requirement)
-
- galaxy_meta = resp
- versions = [resp.version]
- else:
- versions = api.get_collection_versions(namespace, name)
- except GalaxyError as err:
- if err.http_code != 404:
- raise
-
- versions = []
-
- # Automation Hub doesn't return a 404 but an empty version list so we check that to align both AH and
- # Galaxy when the collection is not available on that server.
- if not versions:
- display.vvv("Collection '%s' is not available from server %s %s" % (collection, api.name,
- api.api_server))
- continue
-
- display.vvv("Collection '%s' obtained from server %s %s" % (collection, api.name, api.api_server))
- break
- else:
- raise AnsibleError("Failed to find collection %s:%s" % (collection, requirement))
-
- req = CollectionRequirement(namespace, name, None, api, versions, requirement, force, parent=parent,
- metadata=galaxy_meta, allow_pre_releases=allow_pre_release)
- return req
-
-
-def build_collection(collection_path, output_path, force):
- """Creates the Ansible collection artifact in a .tar.gz file.
-
- :param collection_path: The path to the collection to build. This should be the directory that contains the
- galaxy.yml file.
- :param output_path: The path to create the collection build artifact. This should be a directory.
- :param force: Whether to overwrite an existing collection build artifact or fail.
- :return: The path to the collection build artifact.
- """
- b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict')
- b_galaxy_path = get_galaxy_metadata_path(b_collection_path)
- if not os.path.exists(b_galaxy_path):
- raise AnsibleError("The collection galaxy.yml path '%s' does not exist." % to_native(b_galaxy_path))
-
- info = CollectionRequirement.galaxy_metadata(b_collection_path)
-
- collection_manifest = info['manifest_file']
- collection_meta = collection_manifest['collection_info']
- file_manifest = info['files_file']
-
- collection_output = os.path.join(output_path, "%s-%s-%s.tar.gz" % (collection_meta['namespace'],
- collection_meta['name'],
- collection_meta['version']))
-
- b_collection_output = to_bytes(collection_output, errors='surrogate_or_strict')
- if os.path.exists(b_collection_output):
- if os.path.isdir(b_collection_output):
- raise AnsibleError("The output collection artifact '%s' already exists, "
- "but is a directory - aborting" % to_native(collection_output))
- elif not force:
- raise AnsibleError("The file '%s' already exists. You can use --force to re-create "
- "the collection artifact." % to_native(collection_output))
-
- _build_collection_tar(b_collection_path, b_collection_output, collection_manifest, file_manifest)
- return collection_output
-
-
-def download_collections(collections, output_path, apis, validate_certs, no_deps, allow_pre_release):
- """Download Ansible collections as their tarball from a Galaxy server to the path specified and creates a requirements
- file of the downloaded requirements to be used for an install.
-
- :param collections: The collections to download, should be a list of tuples with (name, requirement, Galaxy Server).
- :param output_path: The path to download the collections to.
- :param apis: A list of GalaxyAPIs to query when search for a collection.
- :param validate_certs: Whether to validate the certificate if downloading a tarball from a non-Galaxy host.
- :param no_deps: Ignore any collection dependencies and only download the base requirements.
- :param allow_pre_release: Do not ignore pre-release versions when selecting the latest.
- """
- with _tempdir() as b_temp_path:
- display.display("Process install dependency map")
- with _display_progress():
- dep_map = _build_dependency_map(collections, [], b_temp_path, apis, validate_certs, True, True, no_deps,
- allow_pre_release=allow_pre_release)
-
- requirements = []
- display.display("Starting collection download process to '%s'" % output_path)
- with _display_progress():
- for name, requirement in dep_map.items():
- collection_filename = "%s-%s-%s.tar.gz" % (requirement.namespace, requirement.name,
- requirement.latest_version)
- dest_path = os.path.join(output_path, collection_filename)
- requirements.append({'name': collection_filename, 'version': requirement.latest_version})
-
- display.display("Downloading collection '%s' to '%s'" % (name, dest_path))
-
- if requirement.api is None and requirement.b_path and os.path.isfile(requirement.b_path):
- shutil.copy(requirement.b_path, to_bytes(dest_path, errors='surrogate_or_strict'))
- elif requirement.api is None and requirement.b_path:
- temp_path = to_text(b_temp_path, errors='surrogate_or_string')
- temp_download_path = build_collection(requirement.b_path, temp_path, True)
- shutil.move(to_bytes(temp_download_path, errors='surrogate_or_strict'),
- to_bytes(dest_path, errors='surrogate_or_strict'))
- else:
- b_temp_download_path = requirement.download(b_temp_path)
- shutil.move(b_temp_download_path, to_bytes(dest_path, errors='surrogate_or_strict'))
-
- display.display("%s (%s) was downloaded successfully" % (name, requirement.latest_version))
-
- requirements_path = os.path.join(output_path, 'requirements.yml')
- display.display("Writing requirements.yml file of downloaded collections to '%s'" % requirements_path)
- with open(to_bytes(requirements_path, errors='surrogate_or_strict'), mode='wb') as req_fd:
- req_fd.write(to_bytes(yaml.safe_dump({'collections': requirements}), errors='surrogate_or_strict'))
-
-
-def publish_collection(collection_path, api, wait, timeout):
- """Publish an Ansible collection tarball into an Ansible Galaxy server.
-
- :param collection_path: The path to the collection tarball to publish.
- :param api: A GalaxyAPI to publish the collection to.
- :param wait: Whether to wait until the import process is complete.
- :param timeout: The time in seconds to wait for the import process to finish, 0 is indefinite.
- """
- import_uri = api.publish_collection(collection_path)
-
- if wait:
- # Galaxy returns a url fragment which differs between v2 and v3. The second to last entry is
- # always the task_id, though.
- # v2: {"task": "https://galaxy-dev.ansible.com/api/v2/collection-imports/35573/"}
- # v3: {"task": "/api/automation-hub/v3/imports/collections/838d1308-a8f4-402c-95cb-7823f3806cd8/"}
- task_id = None
- for path_segment in reversed(import_uri.split('/')):
- if path_segment:
- task_id = path_segment
- break
-
- if not task_id:
- raise AnsibleError("Publishing the collection did not return valid task info. Cannot wait for task status. Returned task info: '%s'" % import_uri)
-
- display.display("Collection has been published to the Galaxy server %s %s" % (api.name, api.api_server))
- with _display_progress():
- api.wait_import_task(task_id, timeout)
- display.display("Collection has been successfully published and imported to the Galaxy server %s %s"
- % (api.name, api.api_server))
- else:
- display.display("Collection has been pushed to the Galaxy server %s %s, not waiting until import has "
- "completed due to --no-wait being set. Import task results can be found at %s"
- % (api.name, api.api_server, import_uri))
-
-
-def install_collections(collections, output_path, apis, validate_certs, ignore_errors, no_deps, force, force_deps,
- allow_pre_release=False):
- """Install Ansible collections to the path specified.
-
- :param collections: The collections to install, should be a list of tuples with (name, requirement, Galaxy server).
- :param output_path: The path to install the collections to.
- :param apis: A list of GalaxyAPIs to query when searching for a collection.
- :param validate_certs: Whether to validate the certificates if downloading a tarball.
- :param ignore_errors: Whether to ignore any errors when installing the collection.
- :param no_deps: Ignore any collection dependencies and only install the base requirements.
- :param force: Re-install a collection if it has already been installed.
- :param force_deps: Re-install a collection as well as its dependencies if they have already been installed.
- """
- existing_collections = find_existing_collections(output_path, fallback_metadata=True)
-
- with _tempdir() as b_temp_path:
- display.display("Process install dependency map")
- with _display_progress():
- dependency_map = _build_dependency_map(collections, existing_collections, b_temp_path, apis,
- validate_certs, force, force_deps, no_deps,
- allow_pre_release=allow_pre_release)
-
- display.display("Starting collection install process")
- with _display_progress():
- for collection in dependency_map.values():
- try:
- collection.install(output_path, b_temp_path)
- except AnsibleError as err:
- if ignore_errors:
- display.warning("Failed to install collection %s but skipping due to --ignore-errors being set. "
- "Error: %s" % (to_text(collection), to_text(err)))
- else:
- raise
-
-
-def validate_collection_name(name):
- """Validates the collection name as an input from the user or a requirements file fit the requirements.
-
- :param name: The input name with optional range specifier split by ':'.
- :return: The input value, required for argparse validation.
- """
- collection, dummy, dummy = name.partition(':')
- if AnsibleCollectionRef.is_valid_collection_name(collection):
- return name
-
- raise AnsibleError("Invalid collection name '%s', "
- "name must be in the format <namespace>.<collection>. \n"
- "Please make sure namespace and collection name contains "
- "characters from [a-zA-Z0-9_] only." % name)
-
-
-def validate_collection_path(collection_path):
- """Ensure a given path ends with 'ansible_collections'
-
- :param collection_path: The path that should end in 'ansible_collections'
- :return: collection_path ending in 'ansible_collections' if it does not already.
- """
-
- if os.path.split(collection_path)[1] != 'ansible_collections':
- return os.path.join(collection_path, 'ansible_collections')
-
- return collection_path
-
-
-def verify_collections(collections, search_paths, apis, validate_certs, ignore_errors, allow_pre_release=False):
-
- with _display_progress():
- with _tempdir() as b_temp_path:
- for collection in collections:
- try:
-
- local_collection = None
- b_collection = to_bytes(collection[0], errors='surrogate_or_strict')
-
- if os.path.isfile(b_collection) or urlparse(collection[0]).scheme.lower() in ['http', 'https'] or len(collection[0].split('.')) != 2:
- raise AnsibleError(message="'%s' is not a valid collection name. The format namespace.name is expected." % collection[0])
-
- collection_name = collection[0]
- namespace, name = collection_name.split('.')
- collection_version = collection[1]
-
- # Verify local collection exists before downloading it from a galaxy server
- for search_path in search_paths:
- b_search_path = to_bytes(os.path.join(search_path, namespace, name), errors='surrogate_or_strict')
- if os.path.isdir(b_search_path):
- if not os.path.isfile(os.path.join(to_text(b_search_path, errors='surrogate_or_strict'), 'MANIFEST.json')):
- raise AnsibleError(
- message="Collection %s does not appear to have a MANIFEST.json. " % collection_name +
- "A MANIFEST.json is expected if the collection has been built and installed via ansible-galaxy."
- )
- local_collection = CollectionRequirement.from_path(b_search_path, False)
- break
- if local_collection is None:
- raise AnsibleError(message='Collection %s is not installed in any of the collection paths.' % collection_name)
-
- # Download collection on a galaxy server for comparison
- try:
- remote_collection = CollectionRequirement.from_name(collection_name, apis, collection_version, False, parent=None,
- allow_pre_release=allow_pre_release)
- except AnsibleError as e:
- if e.message == 'Failed to find collection %s:%s' % (collection[0], collection[1]):
- raise AnsibleError('Failed to find remote collection %s:%s on any of the galaxy servers' % (collection[0], collection[1]))
- raise
-
- download_url = remote_collection.metadata.download_url
- headers = {}
- remote_collection.api._add_auth_token(headers, download_url, required=False)
- b_temp_tar_path = _download_file(download_url, b_temp_path, None, validate_certs, headers=headers)
-
- local_collection.verify(remote_collection, search_path, b_temp_tar_path)
-
- except AnsibleError as err:
- if ignore_errors:
- display.warning("Failed to verify collection %s but skipping due to --ignore-errors being set. "
- "Error: %s" % (collection[0], to_text(err)))
- else:
- raise
-
-
-@contextmanager
-def _tempdir():
- b_temp_path = tempfile.mkdtemp(dir=to_bytes(C.DEFAULT_LOCAL_TMP, errors='surrogate_or_strict'))
- yield b_temp_path
- shutil.rmtree(b_temp_path)
-
-
-@contextmanager
-def _tarfile_extract(tar, member):
- tar_obj = tar.extractfile(member)
- yield member, tar_obj
- tar_obj.close()
-
-
-@contextmanager
-def _display_progress():
- config_display = C.GALAXY_DISPLAY_PROGRESS
- display_wheel = sys.stdout.isatty() if config_display is None else config_display
-
- if not display_wheel:
- yield
- return
-
- def progress(display_queue, actual_display):
- actual_display.debug("Starting display_progress display thread")
- t = threading.current_thread()
-
- while True:
- for c in "|/-\\":
- actual_display.display(c + "\b", newline=False)
- time.sleep(0.1)
-
- # Display a message from the main thread
- while True:
- try:
- method, args, kwargs = display_queue.get(block=False, timeout=0.1)
- except queue.Empty:
- break
- else:
- func = getattr(actual_display, method)
- func(*args, **kwargs)
-
- if getattr(t, "finish", False):
- actual_display.debug("Received end signal for display_progress display thread")
- return
-
- class DisplayThread(object):
-
- def __init__(self, display_queue):
- self.display_queue = display_queue
-
- def __getattr__(self, attr):
- def call_display(*args, **kwargs):
- self.display_queue.put((attr, args, kwargs))
-
- return call_display
-
- # Temporary override the global display class with our own which add the calls to a queue for the thread to call.
- global display
- old_display = display
- try:
- display_queue = queue.Queue()
- display = DisplayThread(display_queue)
- t = threading.Thread(target=progress, args=(display_queue, old_display))
- t.daemon = True
- t.start()
-
- try:
- yield
- finally:
- t.finish = True
- t.join()
- except Exception:
- # The exception is re-raised so we can sure the thread is finished and not using the display anymore
- raise
- finally:
- display = old_display
-
-
-def _get_galaxy_yml(b_galaxy_yml_path):
- meta_info = get_collections_galaxy_meta_info()
-
- mandatory_keys = set()
- string_keys = set()
- list_keys = set()
- dict_keys = set()
-
- for info in meta_info:
- if info.get('required', False):
- mandatory_keys.add(info['key'])
-
- key_list_type = {
- 'str': string_keys,
- 'list': list_keys,
- 'dict': dict_keys,
- }[info.get('type', 'str')]
- key_list_type.add(info['key'])
-
- all_keys = frozenset(list(mandatory_keys) + list(string_keys) + list(list_keys) + list(dict_keys))
-
- try:
- with open(b_galaxy_yml_path, 'rb') as g_yaml:
- galaxy_yml = yaml.safe_load(g_yaml)
- except YAMLError as err:
- raise AnsibleError("Failed to parse the galaxy.yml at '%s' with the following error:\n%s"
- % (to_native(b_galaxy_yml_path), to_native(err)))
-
- set_keys = set(galaxy_yml.keys())
- missing_keys = mandatory_keys.difference(set_keys)
- if missing_keys:
- raise AnsibleError("The collection galaxy.yml at '%s' is missing the following mandatory keys: %s"
- % (to_native(b_galaxy_yml_path), ", ".join(sorted(missing_keys))))
-
- extra_keys = set_keys.difference(all_keys)
- if len(extra_keys) > 0:
- display.warning("Found unknown keys in collection galaxy.yml at '%s': %s"
- % (to_text(b_galaxy_yml_path), ", ".join(extra_keys)))
-
- # Add the defaults if they have not been set
- for optional_string in string_keys:
- if optional_string not in galaxy_yml:
- galaxy_yml[optional_string] = None
-
- for optional_list in list_keys:
- list_val = galaxy_yml.get(optional_list, None)
-
- if list_val is None:
- galaxy_yml[optional_list] = []
- elif not isinstance(list_val, list):
- galaxy_yml[optional_list] = [list_val]
-
- for optional_dict in dict_keys:
- if optional_dict not in galaxy_yml:
- galaxy_yml[optional_dict] = {}
-
- # license is a builtin var in Python, to avoid confusion we just rename it to license_ids
- galaxy_yml['license_ids'] = galaxy_yml['license']
- del galaxy_yml['license']
-
- return galaxy_yml
-
-
-def _build_files_manifest(b_collection_path, namespace, name, ignore_patterns):
- # We always ignore .pyc and .retry files as well as some well known version control directories. The ignore
- # patterns can be extended by the build_ignore key in galaxy.yml
- b_ignore_patterns = [
- b'galaxy.yml',
- b'galaxy.yaml',
- b'.git',
- b'*.pyc',
- b'*.retry',
- b'tests/output', # Ignore ansible-test result output directory.
- to_bytes('{0}-{1}-*.tar.gz'.format(namespace, name)), # Ignores previously built artifacts in the root dir.
- ]
- b_ignore_patterns += [to_bytes(p) for p in ignore_patterns]
- b_ignore_dirs = frozenset([b'CVS', b'.bzr', b'.hg', b'.git', b'.svn', b'__pycache__', b'.tox'])
-
- entry_template = {
- 'name': None,
- 'ftype': None,
- 'chksum_type': None,
- 'chksum_sha256': None,
- 'format': MANIFEST_FORMAT
- }
- manifest = {
- 'files': [
- {
- 'name': '.',
- 'ftype': 'dir',
- 'chksum_type': None,
- 'chksum_sha256': None,
- 'format': MANIFEST_FORMAT,
- },
- ],
- 'format': MANIFEST_FORMAT,
- }
-
- def _walk(b_path, b_top_level_dir):
- for b_item in os.listdir(b_path):
- b_abs_path = os.path.join(b_path, b_item)
- b_rel_base_dir = b'' if b_path == b_top_level_dir else b_path[len(b_top_level_dir) + 1:]
- b_rel_path = os.path.join(b_rel_base_dir, b_item)
- rel_path = to_text(b_rel_path, errors='surrogate_or_strict')
-
- if os.path.isdir(b_abs_path):
- if any(b_item == b_path for b_path in b_ignore_dirs) or \
- any(fnmatch.fnmatch(b_rel_path, b_pattern) for b_pattern in b_ignore_patterns):
- display.vvv("Skipping '%s' for collection build" % to_text(b_abs_path))
- continue
-
- if os.path.islink(b_abs_path):
- b_link_target = os.path.realpath(b_abs_path)
-
- if not _is_child_path(b_link_target, b_top_level_dir):
- display.warning("Skipping '%s' as it is a symbolic link to a directory outside the collection"
- % to_text(b_abs_path))
- continue
-
- manifest_entry = entry_template.copy()
- manifest_entry['name'] = rel_path
- manifest_entry['ftype'] = 'dir'
-
- manifest['files'].append(manifest_entry)
-
- if not os.path.islink(b_abs_path):
- _walk(b_abs_path, b_top_level_dir)
- else:
- if any(fnmatch.fnmatch(b_rel_path, b_pattern) for b_pattern in b_ignore_patterns):
- display.vvv("Skipping '%s' for collection build" % to_text(b_abs_path))
- continue
-
- # Handling of file symlinks occur in _build_collection_tar, the manifest for a symlink is the same for
- # a normal file.
- manifest_entry = entry_template.copy()
- manifest_entry['name'] = rel_path
- manifest_entry['ftype'] = 'file'
- manifest_entry['chksum_type'] = 'sha256'
- manifest_entry['chksum_sha256'] = secure_hash(b_abs_path, hash_func=sha256)
-
- manifest['files'].append(manifest_entry)
-
- _walk(b_collection_path, b_collection_path)
-
- return manifest
-
-
-def _build_manifest(namespace, name, version, authors, readme, tags, description, license_ids, license_file,
- dependencies, repository, documentation, homepage, issues, **kwargs):
-
- manifest = {
- 'collection_info': {
- 'namespace': namespace,
- 'name': name,
- 'version': version,
- 'authors': authors,
- 'readme': readme,
- 'tags': tags,
- 'description': description,
- 'license': license_ids,
- 'license_file': license_file if license_file else None, # Handle galaxy.yml having an empty string (None)
- 'dependencies': dependencies,
- 'repository': repository,
- 'documentation': documentation,
- 'homepage': homepage,
- 'issues': issues,
- },
- 'file_manifest_file': {
- 'name': 'FILES.json',
- 'ftype': 'file',
- 'chksum_type': 'sha256',
- 'chksum_sha256': None, # Filled out in _build_collection_tar
- 'format': MANIFEST_FORMAT
- },
- 'format': MANIFEST_FORMAT,
- }
-
- return manifest
-
-
-def _build_collection_tar(b_collection_path, b_tar_path, collection_manifest, file_manifest):
- """Build a tar.gz collection artifact from the manifest data."""
- files_manifest_json = to_bytes(json.dumps(file_manifest, indent=True), errors='surrogate_or_strict')
- collection_manifest['file_manifest_file']['chksum_sha256'] = secure_hash_s(files_manifest_json, hash_func=sha256)
- collection_manifest_json = to_bytes(json.dumps(collection_manifest, indent=True), errors='surrogate_or_strict')
-
- with _tempdir() as b_temp_path:
- b_tar_filepath = os.path.join(b_temp_path, os.path.basename(b_tar_path))
-
- with tarfile.open(b_tar_filepath, mode='w:gz') as tar_file:
- # Add the MANIFEST.json and FILES.json file to the archive
- for name, b in [('MANIFEST.json', collection_manifest_json), ('FILES.json', files_manifest_json)]:
- b_io = BytesIO(b)
- tar_info = tarfile.TarInfo(name)
- tar_info.size = len(b)
- tar_info.mtime = time.time()
- tar_info.mode = 0o0644
- tar_file.addfile(tarinfo=tar_info, fileobj=b_io)
-
- for file_info in file_manifest['files']:
- if file_info['name'] == '.':
- continue
-
- # arcname expects a native string, cannot be bytes
- filename = to_native(file_info['name'], errors='surrogate_or_strict')
- b_src_path = os.path.join(b_collection_path, to_bytes(filename, errors='surrogate_or_strict'))
-
- def reset_stat(tarinfo):
- if tarinfo.type != tarfile.SYMTYPE:
- existing_is_exec = tarinfo.mode & stat.S_IXUSR
- tarinfo.mode = 0o0755 if existing_is_exec or tarinfo.isdir() else 0o0644
- tarinfo.uid = tarinfo.gid = 0
- tarinfo.uname = tarinfo.gname = ''
-
- return tarinfo
-
- if os.path.islink(b_src_path):
- b_link_target = os.path.realpath(b_src_path)
- if _is_child_path(b_link_target, b_collection_path):
- b_rel_path = os.path.relpath(b_link_target, start=os.path.dirname(b_src_path))
-
- tar_info = tarfile.TarInfo(filename)
- tar_info.type = tarfile.SYMTYPE
- tar_info.linkname = to_native(b_rel_path, errors='surrogate_or_strict')
- tar_info = reset_stat(tar_info)
- tar_file.addfile(tarinfo=tar_info)
-
- continue
-
- # Dealing with a normal file, just add it by name.
- tar_file.add(os.path.realpath(b_src_path), arcname=filename, recursive=False, filter=reset_stat)
-
- shutil.copy(b_tar_filepath, b_tar_path)
- collection_name = "%s.%s" % (collection_manifest['collection_info']['namespace'],
- collection_manifest['collection_info']['name'])
- display.display('Created collection for %s at %s' % (collection_name, to_text(b_tar_path)))
-
-
-def _build_collection_dir(b_collection_path, b_collection_output, collection_manifest, file_manifest):
- """Build a collection directory from the manifest data.
-
- This should follow the same pattern as _build_collection_tar.
- """
- os.makedirs(b_collection_output, mode=0o0755)
-
- files_manifest_json = to_bytes(json.dumps(file_manifest, indent=True), errors='surrogate_or_strict')
- collection_manifest['file_manifest_file']['chksum_sha256'] = secure_hash_s(files_manifest_json, hash_func=sha256)
- collection_manifest_json = to_bytes(json.dumps(collection_manifest, indent=True), errors='surrogate_or_strict')
-
- # Write contents to the files
- for name, b in [('MANIFEST.json', collection_manifest_json), ('FILES.json', files_manifest_json)]:
- b_path = os.path.join(b_collection_output, to_bytes(name, errors='surrogate_or_strict'))
- with open(b_path, 'wb') as file_obj, BytesIO(b) as b_io:
- shutil.copyfileobj(b_io, file_obj)
-
- os.chmod(b_path, 0o0644)
-
- base_directories = []
- for file_info in file_manifest['files']:
- if file_info['name'] == '.':
- continue
-
- src_file = os.path.join(b_collection_path, to_bytes(file_info['name'], errors='surrogate_or_strict'))
- dest_file = os.path.join(b_collection_output, to_bytes(file_info['name'], errors='surrogate_or_strict'))
-
- if any(src_file.startswith(directory) for directory in base_directories):
- continue
-
- existing_is_exec = os.stat(src_file).st_mode & stat.S_IXUSR
- mode = 0o0755 if existing_is_exec else 0o0644
-
- if os.path.isdir(src_file):
- mode = 0o0755
- base_directories.append(src_file)
- shutil.copytree(src_file, dest_file)
- else:
- shutil.copyfile(src_file, dest_file)
-
- os.chmod(dest_file, mode)
-
-
-def find_existing_collections(path, fallback_metadata=False):
- collections = []
-
- b_path = to_bytes(path, errors='surrogate_or_strict')
- for b_namespace in os.listdir(b_path):
- b_namespace_path = os.path.join(b_path, b_namespace)
- if os.path.isfile(b_namespace_path):
- continue
-
- for b_collection in os.listdir(b_namespace_path):
- b_collection_path = os.path.join(b_namespace_path, b_collection)
- if os.path.isdir(b_collection_path):
- req = CollectionRequirement.from_path(b_collection_path, False, fallback_metadata=fallback_metadata)
- display.vvv("Found installed collection %s:%s at '%s'" % (to_text(req), req.latest_version,
- to_text(b_collection_path)))
- collections.append(req)
-
- return collections
-
-
-def _build_dependency_map(collections, existing_collections, b_temp_path, apis, validate_certs, force, force_deps,
- no_deps, allow_pre_release=False):
- dependency_map = {}
-
- # First build the dependency map on the actual requirements
- for name, version, source, req_type in collections:
- _get_collection_info(dependency_map, existing_collections, name, version, source, b_temp_path, apis,
- validate_certs, (force or force_deps), allow_pre_release=allow_pre_release, req_type=req_type)
-
- checked_parents = set([to_text(c) for c in dependency_map.values() if c.skip])
- while len(dependency_map) != len(checked_parents):
- while not no_deps: # Only parse dependencies if no_deps was not set
- parents_to_check = set(dependency_map.keys()).difference(checked_parents)
-
- deps_exhausted = True
- for parent in parents_to_check:
- parent_info = dependency_map[parent]
-
- if parent_info.dependencies:
- deps_exhausted = False
- for dep_name, dep_requirement in parent_info.dependencies.items():
- _get_collection_info(dependency_map, existing_collections, dep_name, dep_requirement,
- None, b_temp_path, apis, validate_certs, force_deps,
- parent=parent, allow_pre_release=allow_pre_release)
-
- checked_parents.add(parent)
-
- # No extra dependencies were resolved, exit loop
- if deps_exhausted:
- break
-
- # Now we have resolved the deps to our best extent, now select the latest version for collections with
- # multiple versions found and go from there
- deps_not_checked = set(dependency_map.keys()).difference(checked_parents)
- for collection in deps_not_checked:
- dependency_map[collection].set_latest_version()
- if no_deps or len(dependency_map[collection].dependencies) == 0:
- checked_parents.add(collection)
-
- return dependency_map
-
-
-def _collections_from_scm(collection, requirement, b_temp_path, force, parent=None):
- """Returns a list of collections found in the repo. If there is a galaxy.yml in the collection then just return
- the specific collection. Otherwise, check each top-level directory for a galaxy.yml.
-
- :param collection: URI to a git repo
- :param requirement: The version of the artifact
- :param b_temp_path: The temporary path to the archive of a collection
- :param force: Whether to overwrite an existing collection or fail
- :param parent: The name of the parent collection
- :raises AnsibleError: if nothing found
- :return: List of CollectionRequirement objects
- :rtype: list
- """
-
- reqs = []
- name, version, path, fragment = parse_scm(collection, requirement)
- b_repo_root = to_bytes(name, errors='surrogate_or_strict')
-
- b_collection_path = os.path.join(b_temp_path, b_repo_root)
- if fragment:
- b_fragment = to_bytes(fragment, errors='surrogate_or_strict')
- b_collection_path = os.path.join(b_collection_path, b_fragment)
-
- b_galaxy_path = get_galaxy_metadata_path(b_collection_path)
-
- err = ("%s appears to be an SCM collection source, but the required galaxy.yml was not found. "
- "Append #path/to/collection/ to your URI (before the comma separated version, if one is specified) "
- "to point to a directory containing the galaxy.yml or directories of collections" % collection)
-
- display.vvvvv("Considering %s as a possible path to a collection's galaxy.yml" % b_galaxy_path)
- if os.path.exists(b_galaxy_path):
- return [CollectionRequirement.from_path(b_collection_path, force, parent, fallback_metadata=True, skip=False)]
-
- if not os.path.isdir(b_collection_path) or not os.listdir(b_collection_path):
- raise AnsibleError(err)
-
- for b_possible_collection in os.listdir(b_collection_path):
- b_collection = os.path.join(b_collection_path, b_possible_collection)
- if not os.path.isdir(b_collection):
- continue
- b_galaxy = get_galaxy_metadata_path(b_collection)
- display.vvvvv("Considering %s as a possible path to a collection's galaxy.yml" % b_galaxy)
- if os.path.exists(b_galaxy):
- reqs.append(CollectionRequirement.from_path(b_collection, force, parent, fallback_metadata=True, skip=False))
- if not reqs:
- raise AnsibleError(err)
-
- return reqs
-
-
-def _get_collection_info(dep_map, existing_collections, collection, requirement, source, b_temp_path, apis,
- validate_certs, force, parent=None, allow_pre_release=False, req_type=None):
- dep_msg = ""
- if parent:
- dep_msg = " - as dependency of %s" % parent
- display.vvv("Processing requirement collection '%s'%s" % (to_text(collection), dep_msg))
-
- b_tar_path = None
-
- is_file = (
- req_type == 'file' or
- (not req_type and os.path.isfile(to_bytes(collection, errors='surrogate_or_strict')))
- )
-
- is_url = (
- req_type == 'url' or
- (not req_type and urlparse(collection).scheme.lower() in ['http', 'https'])
- )
-
- is_scm = (
- req_type == 'git' or
- (not req_type and not b_tar_path and collection.startswith(('git+', 'git@')))
- )
-
- if is_file:
- display.vvvv("Collection requirement '%s' is a tar artifact" % to_text(collection))
- b_tar_path = to_bytes(collection, errors='surrogate_or_strict')
- elif is_url:
- display.vvvv("Collection requirement '%s' is a URL to a tar artifact" % collection)
- try:
- b_tar_path = _download_file(collection, b_temp_path, None, validate_certs)
- except urllib_error.URLError as err:
- raise AnsibleError("Failed to download collection tar from '%s': %s"
- % (to_native(collection), to_native(err)))
-
- if is_scm:
- if not collection.startswith('git'):
- collection = 'git+' + collection
-
- name, version, path, fragment = parse_scm(collection, requirement)
- b_tar_path = scm_archive_collection(path, name=name, version=version)
-
- with tarfile.open(b_tar_path, mode='r') as collection_tar:
- collection_tar.extractall(path=to_text(b_temp_path))
-
- # Ignore requirement if it is set (it must follow semantic versioning, unlike a git version, which is any tree-ish)
- # If the requirement was the only place version was set, requirement == version at this point
- if requirement not in {"*", ""} and requirement != version:
- display.warning(
- "The collection {0} appears to be a git repository and two versions were provided: '{1}', and '{2}'. "
- "The version {2} is being disregarded.".format(collection, version, requirement)
- )
- requirement = "*"
-
- reqs = _collections_from_scm(collection, requirement, b_temp_path, force, parent)
- for req in reqs:
- collection_info = get_collection_info_from_req(dep_map, req)
- update_dep_map_collection_info(dep_map, existing_collections, collection_info, parent, requirement)
- else:
- if b_tar_path:
- req = CollectionRequirement.from_tar(b_tar_path, force, parent=parent)
- collection_info = get_collection_info_from_req(dep_map, req)
- else:
- validate_collection_name(collection)
-
- display.vvvv("Collection requirement '%s' is the name of a collection" % collection)
- if collection in dep_map:
- collection_info = dep_map[collection]
- collection_info.add_requirement(parent, requirement)
- else:
- apis = [source] if source else apis
- collection_info = CollectionRequirement.from_name(collection, apis, requirement, force, parent=parent,
- allow_pre_release=allow_pre_release)
-
- update_dep_map_collection_info(dep_map, existing_collections, collection_info, parent, requirement)
-
-
-def get_collection_info_from_req(dep_map, collection):
- collection_name = to_text(collection)
- if collection_name in dep_map:
- collection_info = dep_map[collection_name]
- collection_info.add_requirement(None, collection.latest_version)
- else:
- collection_info = collection
- return collection_info
-
-
-def update_dep_map_collection_info(dep_map, existing_collections, collection_info, parent, requirement):
- existing = [c for c in existing_collections if to_text(c) == to_text(collection_info)]
- if existing and not collection_info.force:
- # Test that the installed collection fits the requirement
- existing[0].add_requirement(parent, requirement)
- collection_info = existing[0]
-
- dep_map[to_text(collection_info)] = collection_info
-
-
-def parse_scm(collection, version):
- if ',' in collection:
- collection, version = collection.split(',', 1)
- elif version == '*' or not version:
- version = 'HEAD'
-
- if collection.startswith('git+'):
- path = collection[4:]
- else:
- path = collection
-
- path, fragment = urldefrag(path)
- fragment = fragment.strip(os.path.sep)
-
- if path.endswith(os.path.sep + '.git'):
- name = path.split(os.path.sep)[-2]
- elif '://' not in path and '@' not in path:
- name = path
- else:
- name = path.split('/')[-1]
- if name.endswith('.git'):
- name = name[:-4]
-
- return name, version, path, fragment
-
-
-def _download_file(url, b_path, expected_hash, validate_certs, headers=None):
- urlsplit = os.path.splitext(to_text(url.rsplit('/', 1)[1]))
- b_file_name = to_bytes(urlsplit[0], errors='surrogate_or_strict')
- b_file_ext = to_bytes(urlsplit[1], errors='surrogate_or_strict')
- b_file_path = tempfile.NamedTemporaryFile(dir=b_path, prefix=b_file_name, suffix=b_file_ext, delete=False).name
-
- display.display("Downloading %s to %s" % (url, to_text(b_path)))
- # Galaxy redirs downloads to S3 which reject the request if an Authorization header is attached so don't redir that
- resp = open_url(to_native(url, errors='surrogate_or_strict'), validate_certs=validate_certs, headers=headers,
- unredirected_headers=['Authorization'], http_agent=user_agent())
-
- with open(b_file_path, 'wb') as download_file:
- actual_hash = _consume_file(resp, download_file)
-
- if expected_hash:
- display.vvvv("Validating downloaded file hash %s with expected hash %s" % (actual_hash, expected_hash))
- if expected_hash != actual_hash:
- raise AnsibleError("Mismatch artifact hash with downloaded file")
-
- return b_file_path
-
-
-def _extract_tar_dir(tar, dirname, b_dest):
- """ Extracts a directory from a collection tar. """
- member_names = [to_native(dirname, errors='surrogate_or_strict')]
-
- # Create list of members with and without trailing separator
- if not member_names[-1].endswith(os.path.sep):
- member_names.append(member_names[-1] + os.path.sep)
-
- # Try all of the member names and stop on the first one that are able to successfully get
- for member in member_names:
- try:
- tar_member = tar.getmember(member)
- except KeyError:
- continue
- break
- else:
- # If we still can't find the member, raise a nice error.
- raise AnsibleError("Unable to extract '%s' from collection" % to_native(member, errors='surrogate_or_strict'))
-
- b_dir_path = os.path.join(b_dest, to_bytes(dirname, errors='surrogate_or_strict'))
-
- b_parent_path = os.path.dirname(b_dir_path)
- try:
- os.makedirs(b_parent_path, mode=0o0755)
- except OSError as e:
- if e.errno != errno.EEXIST:
- raise
-
- if tar_member.type == tarfile.SYMTYPE:
- b_link_path = to_bytes(tar_member.linkname, errors='surrogate_or_strict')
- if not _is_child_path(b_link_path, b_dest, link_name=b_dir_path):
- raise AnsibleError("Cannot extract symlink '%s' in collection: path points to location outside of "
- "collection '%s'" % (to_native(dirname), b_link_path))
-
- os.symlink(b_link_path, b_dir_path)
-
- else:
- if not os.path.isdir(b_dir_path):
- os.mkdir(b_dir_path, 0o0755)
-
-
-def _extract_tar_file(tar, filename, b_dest, b_temp_path, expected_hash=None):
- """ Extracts a file from a collection tar. """
- with _get_tar_file_member(tar, filename) as (tar_member, tar_obj):
- if tar_member.type == tarfile.SYMTYPE:
- actual_hash = _consume_file(tar_obj)
-
- else:
- with tempfile.NamedTemporaryFile(dir=b_temp_path, delete=False) as tmpfile_obj:
- actual_hash = _consume_file(tar_obj, tmpfile_obj)
-
- if expected_hash and actual_hash != expected_hash:
- raise AnsibleError("Checksum mismatch for '%s' inside collection at '%s'"
- % (to_native(filename, errors='surrogate_or_strict'), to_native(tar.name)))
-
- b_dest_filepath = os.path.abspath(os.path.join(b_dest, to_bytes(filename, errors='surrogate_or_strict')))
- b_parent_dir = os.path.dirname(b_dest_filepath)
- if not _is_child_path(b_parent_dir, b_dest):
- raise AnsibleError("Cannot extract tar entry '%s' as it will be placed outside the collection directory"
- % to_native(filename, errors='surrogate_or_strict'))
-
- if not os.path.exists(b_parent_dir):
- # Seems like Galaxy does not validate if all file entries have a corresponding dir ftype entry. This check
- # makes sure we create the parent directory even if it wasn't set in the metadata.
- os.makedirs(b_parent_dir, mode=0o0755)
-
- if tar_member.type == tarfile.SYMTYPE:
- b_link_path = to_bytes(tar_member.linkname, errors='surrogate_or_strict')
- if not _is_child_path(b_link_path, b_dest, link_name=b_dest_filepath):
- raise AnsibleError("Cannot extract symlink '%s' in collection: path points to location outside of "
- "collection '%s'" % (to_native(filename), b_link_path))
-
- os.symlink(b_link_path, b_dest_filepath)
-
- else:
- shutil.move(to_bytes(tmpfile_obj.name, errors='surrogate_or_strict'), b_dest_filepath)
-
- # Default to rw-r--r-- and only add execute if the tar file has execute.
- tar_member = tar.getmember(to_native(filename, errors='surrogate_or_strict'))
- new_mode = 0o644
- if stat.S_IMODE(tar_member.mode) & stat.S_IXUSR:
- new_mode |= 0o0111
-
- os.chmod(b_dest_filepath, new_mode)
-
-
-def _get_tar_file_member(tar, filename):
- n_filename = to_native(filename, errors='surrogate_or_strict')
- try:
- member = tar.getmember(n_filename)
- except KeyError:
- raise AnsibleError("Collection tar at '%s' does not contain the expected file '%s'." % (
- to_native(tar.name),
- n_filename))
-
- return _tarfile_extract(tar, member)
-
-
-def _get_json_from_tar_file(b_path, filename):
- file_contents = ''
-
- with tarfile.open(b_path, mode='r') as collection_tar:
- with _get_tar_file_member(collection_tar, filename) as (dummy, tar_obj):
- bufsize = 65536
- data = tar_obj.read(bufsize)
- while data:
- file_contents += to_text(data)
- data = tar_obj.read(bufsize)
-
- return json.loads(file_contents)
-
-
-def _get_tar_file_hash(b_path, filename):
- with tarfile.open(b_path, mode='r') as collection_tar:
- with _get_tar_file_member(collection_tar, filename) as (dummy, tar_obj):
- return _consume_file(tar_obj)
-
-
-def _is_child_path(path, parent_path, link_name=None):
- """ Checks that path is a path within the parent_path specified. """
- b_path = to_bytes(path, errors='surrogate_or_strict')
-
- if link_name and not os.path.isabs(b_path):
- # If link_name is specified, path is the source of the link and we need to resolve the absolute path.
- b_link_dir = os.path.dirname(to_bytes(link_name, errors='surrogate_or_strict'))
- b_path = os.path.abspath(os.path.join(b_link_dir, b_path))
-
- b_parent_path = to_bytes(parent_path, errors='surrogate_or_strict')
- return b_path == b_parent_path or b_path.startswith(b_parent_path + to_bytes(os.path.sep))
-
-
-def _consume_file(read_from, write_to=None):
- bufsize = 65536
- sha256_digest = sha256()
- data = read_from.read(bufsize)
- while data:
- if write_to is not None:
- write_to.write(data)
- write_to.flush()
- sha256_digest.update(data)
- data = read_from.read(bufsize)
-
- return sha256_digest.hexdigest()
-
-
-def get_galaxy_metadata_path(b_path):
- return os.path.join(b_path, b'galaxy.yml')
diff --git a/lib/ansible/galaxy/collection/__init__.py b/lib/ansible/galaxy/collection/__init__.py
new file mode 100644
index 00000000..85a91bdc
--- /dev/null
+++ b/lib/ansible/galaxy/collection/__init__.py
@@ -0,0 +1,1382 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2019-2021, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""Installed collections management package."""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import errno
+import fnmatch
+import functools
+import json
+import os
+import shutil
+import stat
+import sys
+import tarfile
+import tempfile
+import threading
+import time
+import yaml
+
+from collections import namedtuple
+from contextlib import contextmanager
+from distutils.version import LooseVersion
+from hashlib import sha256
+from io import BytesIO
+from itertools import chain
+from yaml.error import YAMLError
+
+# NOTE: Adding type ignores is a hack for mypy to shut up wrt bug #1153
+try:
+ import queue # type: ignore[import]
+except ImportError: # Python 2
+ import Queue as queue # type: ignore[import,no-redef]
+
+try:
+ # NOTE: It's in Python 3 stdlib and can be installed on Python 2
+ # NOTE: via `pip install typing`. Unnecessary in runtime.
+ # NOTE: `TYPE_CHECKING` is True during mypy-typecheck-time.
+ from typing import TYPE_CHECKING
+except ImportError:
+ TYPE_CHECKING = False
+
+if TYPE_CHECKING:
+ from typing import Dict, Iterable, List, Optional, Text, Union
+ if sys.version_info[:2] >= (3, 8):
+ from typing import Literal
+ else: # Python 2 + Python 3.4-3.7
+ from typing_extensions import Literal
+
+ from ansible.galaxy.api import GalaxyAPI
+ from ansible.galaxy.collection.concrete_artifact_manager import (
+ ConcreteArtifactsManager,
+ )
+
+ ManifestKeysType = Literal[
+ 'collection_info', 'file_manifest_file', 'format',
+ ]
+ FileMetaKeysType = Literal[
+ 'name',
+ 'ftype',
+ 'chksum_type',
+ 'chksum_sha256',
+ 'format',
+ ]
+ CollectionInfoKeysType = Literal[
+ # collection meta:
+ 'namespace', 'name', 'version',
+ 'authors', 'readme',
+ 'tags', 'description',
+ 'license', 'license_file',
+ 'dependencies',
+ 'repository', 'documentation',
+ 'homepage', 'issues',
+
+ # files meta:
+ FileMetaKeysType,
+ ]
+ ManifestValueType = Dict[
+ CollectionInfoKeysType,
+ Optional[
+ Union[
+ int, str, # scalars, like name/ns, schema version
+ List[str], # lists of scalars, like tags
+ Dict[str, str], # deps map
+ ],
+ ],
+ ]
+ CollectionManifestType = Dict[ManifestKeysType, ManifestValueType]
+ FileManifestEntryType = Dict[FileMetaKeysType, Optional[Union[str, int]]]
+ FilesManifestType = Dict[
+ Literal['files', 'format'],
+ Union[List[FileManifestEntryType], int],
+ ]
+
+import ansible.constants as C
+from ansible.errors import AnsibleError
+from ansible.galaxy import get_collections_galaxy_meta_info
+from ansible.galaxy.collection.concrete_artifact_manager import (
+ _consume_file,
+ _download_file,
+ _get_json_from_installed_dir,
+ _get_meta_from_src_dir,
+ _tarfile_extract,
+)
+from ansible.galaxy.collection.galaxy_api_proxy import MultiGalaxyAPIProxy
+from ansible.galaxy.dependency_resolution import (
+ build_collection_dependency_resolver,
+)
+from ansible.galaxy.dependency_resolution.dataclasses import (
+ Candidate, Requirement, _is_installed_collection_dir,
+)
+from ansible.galaxy.dependency_resolution.errors import (
+ CollectionDependencyResolutionImpossible,
+ CollectionDependencyInconsistentCandidate,
+)
+from ansible.galaxy.dependency_resolution.versioning import meets_requirements
+from ansible.module_utils.six import raise_from
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.utils.collection_loader import AnsibleCollectionRef
+from ansible.utils.display import Display
+from ansible.utils.hashing import secure_hash, secure_hash_s
+from ansible.utils.version import SemanticVersion
+
+
+display = Display()
+
+MANIFEST_FORMAT = 1
+MANIFEST_FILENAME = 'MANIFEST.json'
+
+ModifiedContent = namedtuple('ModifiedContent', ['filename', 'expected', 'installed'])
+
+
+# FUTURE: expose actual verify result details for a collection on this object, maybe reimplement as dataclass on py3.8+
+class CollectionVerifyResult:
+ def __init__(self, collection_name): # type: (str) -> None
+ self.collection_name = collection_name # type: str
+ self.success = True # type: bool
+
+
+def verify_local_collection(
+ local_collection, remote_collection,
+ artifacts_manager,
+): # type: (Candidate, Optional[Candidate], ConcreteArtifactsManager) -> CollectionVerifyResult
+ """Verify integrity of the locally installed collection.
+
+ :param local_collection: Collection being checked.
+ :param remote_collection: Upstream collection (optional, if None, only verify local artifact)
+ :param artifacts_manager: Artifacts manager.
+ :return: a collection verify result object.
+ """
+ result = CollectionVerifyResult(local_collection.fqcn)
+
+ b_collection_path = to_bytes(
+ local_collection.src, errors='surrogate_or_strict',
+ )
+
+ display.display("Verifying '{coll!s}'.".format(coll=local_collection))
+ display.display(
+ u"Installed collection found at '{path!s}'".
+ format(path=to_text(local_collection.src)),
+ )
+
+ modified_content = [] # type: List[ModifiedContent]
+
+ verify_local_only = remote_collection is None
+ if verify_local_only:
+ # partial away the local FS detail so we can just ask generically during validation
+ get_json_from_validation_source = functools.partial(_get_json_from_installed_dir, b_collection_path)
+ get_hash_from_validation_source = functools.partial(_get_file_hash, b_collection_path)
+
+ # since we're not downloading this, just seed it with the value from disk
+ manifest_hash = get_hash_from_validation_source(MANIFEST_FILENAME)
+ else:
+ # fetch remote
+ b_temp_tar_path = ( # NOTE: AnsibleError is raised on URLError
+ artifacts_manager.get_artifact_path
+ if remote_collection.is_concrete_artifact
+ else artifacts_manager.get_galaxy_artifact_path
+ )(remote_collection)
+
+ display.vvv(
+ u"Remote collection cached as '{path!s}'".format(path=to_text(b_temp_tar_path))
+ )
+
+ # partial away the tarball details so we can just ask generically during validation
+ get_json_from_validation_source = functools.partial(_get_json_from_tar_file, b_temp_tar_path)
+ get_hash_from_validation_source = functools.partial(_get_tar_file_hash, b_temp_tar_path)
+
+ # Compare installed version versus requirement version
+ if local_collection.ver != remote_collection.ver:
+ err = (
+ "{local_fqcn!s} has the version '{local_ver!s}' but "
+ "is being compared to '{remote_ver!s}'".format(
+ local_fqcn=local_collection.fqcn,
+ local_ver=local_collection.ver,
+ remote_ver=remote_collection.ver,
+ )
+ )
+ display.display(err)
+ result.success = False
+ return result
+
+ # Verify the downloaded manifest hash matches the installed copy before verifying the file manifest
+ manifest_hash = get_hash_from_validation_source(MANIFEST_FILENAME)
+ _verify_file_hash(b_collection_path, MANIFEST_FILENAME, manifest_hash, modified_content)
+
+ display.display('MANIFEST.json hash: {manifest_hash}'.format(manifest_hash=manifest_hash))
+
+ manifest = get_json_from_validation_source(MANIFEST_FILENAME)
+
+ # Use the manifest to verify the file manifest checksum
+ file_manifest_data = manifest['file_manifest_file']
+ file_manifest_filename = file_manifest_data['name']
+ expected_hash = file_manifest_data['chksum_%s' % file_manifest_data['chksum_type']]
+
+ # Verify the file manifest before using it to verify individual files
+ _verify_file_hash(b_collection_path, file_manifest_filename, expected_hash, modified_content)
+ file_manifest = get_json_from_validation_source(file_manifest_filename)
+
+ # Use the file manifest to verify individual file checksums
+ for manifest_data in file_manifest['files']:
+ if manifest_data['ftype'] == 'file':
+ expected_hash = manifest_data['chksum_%s' % manifest_data['chksum_type']]
+ _verify_file_hash(b_collection_path, manifest_data['name'], expected_hash, modified_content)
+
+ if modified_content:
+ result.success = False
+ display.display(
+ 'Collection {fqcn!s} contains modified content '
+ 'in the following files:'.
+ format(fqcn=to_text(local_collection.fqcn)),
+ )
+ for content_change in modified_content:
+ display.display(' %s' % content_change.filename)
+ display.v(" Expected: %s\n Found: %s" % (content_change.expected, content_change.installed))
+ else:
+ what = "are internally consistent with its manifest" if verify_local_only else "match the remote collection"
+ display.display(
+ "Successfully verified that checksums for '{coll!s}' {what!s}.".
+ format(coll=local_collection, what=what),
+ )
+
+ return result
+
+
+def build_collection(u_collection_path, u_output_path, force):
+ # type: (Text, Text, bool) -> Text
+ """Creates the Ansible collection artifact in a .tar.gz file.
+
+ :param u_collection_path: The path to the collection to build. This should be the directory that contains the
+ galaxy.yml file.
+ :param u_output_path: The path to create the collection build artifact. This should be a directory.
+ :param force: Whether to overwrite an existing collection build artifact or fail.
+ :return: The path to the collection build artifact.
+ """
+ b_collection_path = to_bytes(u_collection_path, errors='surrogate_or_strict')
+ try:
+ collection_meta = _get_meta_from_src_dir(b_collection_path)
+ except LookupError as lookup_err:
+ raise_from(AnsibleError(to_native(lookup_err)), lookup_err)
+
+ collection_manifest = _build_manifest(**collection_meta)
+ file_manifest = _build_files_manifest(
+ b_collection_path,
+ collection_meta['namespace'], # type: ignore[arg-type]
+ collection_meta['name'], # type: ignore[arg-type]
+ collection_meta['build_ignore'], # type: ignore[arg-type]
+ )
+
+ artifact_tarball_file_name = '{ns!s}-{name!s}-{ver!s}.tar.gz'.format(
+ name=collection_meta['name'],
+ ns=collection_meta['namespace'],
+ ver=collection_meta['version'],
+ )
+ b_collection_output = os.path.join(
+ to_bytes(u_output_path),
+ to_bytes(artifact_tarball_file_name, errors='surrogate_or_strict'),
+ )
+
+ if os.path.exists(b_collection_output):
+ if os.path.isdir(b_collection_output):
+ raise AnsibleError("The output collection artifact '%s' already exists, "
+ "but is a directory - aborting" % to_native(b_collection_output))
+ elif not force:
+ raise AnsibleError("The file '%s' already exists. You can use --force to re-create "
+ "the collection artifact." % to_native(b_collection_output))
+
+ collection_output = _build_collection_tar(b_collection_path, b_collection_output, collection_manifest, file_manifest)
+ return collection_output
+
+
+def download_collections(
+ collections, # type: Iterable[Requirement]
+ output_path, # type: str
+ apis, # type: Iterable[GalaxyAPI]
+ no_deps, # type: bool
+ allow_pre_release, # type: bool
+ artifacts_manager, # type: ConcreteArtifactsManager
+): # type: (...) -> None
+ """Download Ansible collections as their tarball from a Galaxy server to the path specified and creates a requirements
+ file of the downloaded requirements to be used for an install.
+
+ :param collections: The collections to download, should be a list of tuples with (name, requirement, Galaxy Server).
+ :param output_path: The path to download the collections to.
+ :param apis: A list of GalaxyAPIs to query when search for a collection.
+ :param validate_certs: Whether to validate the certificate if downloading a tarball from a non-Galaxy host.
+ :param no_deps: Ignore any collection dependencies and only download the base requirements.
+ :param allow_pre_release: Do not ignore pre-release versions when selecting the latest.
+ """
+ with _display_progress("Process download dependency map"):
+ dep_map = _resolve_depenency_map(
+ set(collections),
+ galaxy_apis=apis,
+ preferred_candidates=None,
+ concrete_artifacts_manager=artifacts_manager,
+ no_deps=no_deps,
+ allow_pre_release=allow_pre_release,
+ upgrade=False,
+ )
+
+ b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
+
+ requirements = []
+ with _display_progress(
+ "Starting collection download process to '{path!s}'".
+ format(path=output_path),
+ ):
+ for fqcn, concrete_coll_pin in dep_map.copy().items(): # FIXME: move into the provider
+ if concrete_coll_pin.is_virtual:
+ display.display(
+ 'Virtual collection {coll!s} is not downloadable'.
+ format(coll=to_text(concrete_coll_pin)),
+ )
+ continue
+
+ display.display(
+ u"Downloading collection '{coll!s}' to '{path!s}'".
+ format(coll=to_text(concrete_coll_pin), path=to_text(b_output_path)),
+ )
+
+ b_src_path = (
+ artifacts_manager.get_artifact_path
+ if concrete_coll_pin.is_concrete_artifact
+ else artifacts_manager.get_galaxy_artifact_path
+ )(concrete_coll_pin)
+
+ b_dest_path = os.path.join(
+ b_output_path,
+ os.path.basename(b_src_path),
+ )
+
+ if concrete_coll_pin.is_dir:
+ b_dest_path = to_bytes(
+ build_collection(
+ to_text(b_src_path, errors='surrogate_or_strict'),
+ to_text(output_path, errors='surrogate_or_strict'),
+ force=True,
+ ),
+ errors='surrogate_or_strict',
+ )
+ else:
+ shutil.copy(to_native(b_src_path), to_native(b_dest_path))
+
+ display.display(
+ "Collection '{coll!s}' was downloaded successfully".
+ format(coll=concrete_coll_pin),
+ )
+ requirements.append({
+ # FIXME: Consider using a more specific upgraded format
+ # FIXME: having FQCN in the name field, with src field
+ # FIXME: pointing to the file path, and explicitly set
+ # FIXME: type. If version and name are set, it'd
+ # FIXME: perform validation against the actual metadata
+ # FIXME: in the artifact src points at.
+ 'name': to_native(os.path.basename(b_dest_path)),
+ 'version': concrete_coll_pin.ver,
+ })
+
+ requirements_path = os.path.join(output_path, 'requirements.yml')
+ b_requirements_path = to_bytes(
+ requirements_path, errors='surrogate_or_strict',
+ )
+ display.display(
+ u'Writing requirements.yml file of downloaded collections '
+ "to '{path!s}'".format(path=to_text(requirements_path)),
+ )
+ yaml_bytes = to_bytes(
+ yaml.safe_dump({'collections': requirements}),
+ errors='surrogate_or_strict',
+ )
+ with open(b_requirements_path, mode='wb') as req_fd:
+ req_fd.write(yaml_bytes)
+
+
+def publish_collection(collection_path, api, wait, timeout):
+ """Publish an Ansible collection tarball into an Ansible Galaxy server.
+
+ :param collection_path: The path to the collection tarball to publish.
+ :param api: A GalaxyAPI to publish the collection to.
+ :param wait: Whether to wait until the import process is complete.
+ :param timeout: The time in seconds to wait for the import process to finish, 0 is indefinite.
+ """
+ import_uri = api.publish_collection(collection_path)
+
+ if wait:
+ # Galaxy returns a url fragment which differs between v2 and v3. The second to last entry is
+ # always the task_id, though.
+ # v2: {"task": "https://galaxy-dev.ansible.com/api/v2/collection-imports/35573/"}
+ # v3: {"task": "/api/automation-hub/v3/imports/collections/838d1308-a8f4-402c-95cb-7823f3806cd8/"}
+ task_id = None
+ for path_segment in reversed(import_uri.split('/')):
+ if path_segment:
+ task_id = path_segment
+ break
+
+ if not task_id:
+ raise AnsibleError("Publishing the collection did not return valid task info. Cannot wait for task status. Returned task info: '%s'" % import_uri)
+
+ with _display_progress(
+ "Collection has been published to the Galaxy server "
+ "{api.name!s} {api.api_server!s}".format(api=api),
+ ):
+ api.wait_import_task(task_id, timeout)
+ display.display("Collection has been successfully published and imported to the Galaxy server %s %s"
+ % (api.name, api.api_server))
+ else:
+ display.display("Collection has been pushed to the Galaxy server %s %s, not waiting until import has "
+ "completed due to --no-wait being set. Import task results can be found at %s"
+ % (api.name, api.api_server, import_uri))
+
+
+def install_collections(
+ collections, # type: Iterable[Requirement]
+ output_path, # type: str
+ apis, # type: Iterable[GalaxyAPI]
+ ignore_errors, # type: bool
+ no_deps, # type: bool
+ force, # type: bool
+ force_deps, # type: bool
+ upgrade, # type: bool
+ allow_pre_release, # type: bool
+ artifacts_manager, # type: ConcreteArtifactsManager
+): # type: (...) -> None
+ """Install Ansible collections to the path specified.
+
+ :param collections: The collections to install.
+ :param output_path: The path to install the collections to.
+ :param apis: A list of GalaxyAPIs to query when searching for a collection.
+ :param validate_certs: Whether to validate the certificates if downloading a tarball.
+ :param ignore_errors: Whether to ignore any errors when installing the collection.
+ :param no_deps: Ignore any collection dependencies and only install the base requirements.
+ :param force: Re-install a collection if it has already been installed.
+ :param force_deps: Re-install a collection as well as its dependencies if they have already been installed.
+ """
+ existing_collections = {
+ Requirement(coll.fqcn, coll.ver, coll.src, coll.type)
+ for coll in find_existing_collections(output_path, artifacts_manager)
+ }
+
+ unsatisfied_requirements = set(
+ chain.from_iterable(
+ (
+ Requirement.from_dir_path(sub_coll, artifacts_manager)
+ for sub_coll in (
+ artifacts_manager.
+ get_direct_collection_dependencies(install_req).
+ keys()
+ )
+ )
+ if install_req.is_subdirs else (install_req, )
+ for install_req in collections
+ ),
+ )
+ requested_requirements_names = {req.fqcn for req in unsatisfied_requirements}
+
+ # NOTE: Don't attempt to reevaluate already installed deps
+ # NOTE: unless `--force` or `--force-with-deps` is passed
+ unsatisfied_requirements -= set() if force or force_deps else {
+ req
+ for req in unsatisfied_requirements
+ for exs in existing_collections
+ if req.fqcn == exs.fqcn and meets_requirements(exs.ver, req.ver)
+ }
+
+ if not unsatisfied_requirements and not upgrade:
+ display.display(
+ 'Nothing to do. All requested collections are already '
+ 'installed. If you want to reinstall them, '
+ 'consider using `--force`.'
+ )
+ return
+
+ # FIXME: This probably needs to be improved to
+ # FIXME: properly match differing src/type.
+ existing_non_requested_collections = {
+ coll for coll in existing_collections
+ if coll.fqcn not in requested_requirements_names
+ }
+
+ preferred_requirements = (
+ [] if force_deps
+ else existing_non_requested_collections if force
+ else existing_collections
+ )
+ preferred_collections = {
+ Candidate(coll.fqcn, coll.ver, coll.src, coll.type)
+ for coll in preferred_requirements
+ }
+ with _display_progress("Process install dependency map"):
+ dependency_map = _resolve_depenency_map(
+ collections,
+ galaxy_apis=apis,
+ preferred_candidates=preferred_collections,
+ concrete_artifacts_manager=artifacts_manager,
+ no_deps=no_deps,
+ allow_pre_release=allow_pre_release,
+ upgrade=upgrade,
+ )
+
+ with _display_progress("Starting collection install process"):
+ for fqcn, concrete_coll_pin in dependency_map.items():
+ if concrete_coll_pin.is_virtual:
+ display.vvvv(
+ "Skipping '{coll!s}' as it is virtual".
+ format(coll=to_text(concrete_coll_pin)),
+ )
+ continue
+
+ if concrete_coll_pin in preferred_collections:
+ display.display(
+ "Skipping '{coll!s}' as it is already installed".
+ format(coll=to_text(concrete_coll_pin)),
+ )
+ continue
+
+ try:
+ install(concrete_coll_pin, output_path, artifacts_manager)
+ except AnsibleError as err:
+ if ignore_errors:
+ display.warning(
+ 'Failed to install collection {coll!s} but skipping '
+ 'due to --ignore-errors being set. Error: {error!s}'.
+ format(
+ coll=to_text(concrete_coll_pin),
+ error=to_text(err),
+ )
+ )
+ else:
+ raise
+
+
+# NOTE: imported in ansible.cli.galaxy
+def validate_collection_name(name): # type: (str) -> str
+ """Validates the collection name as an input from the user or a requirements file fit the requirements.
+
+ :param name: The input name with optional range specifier split by ':'.
+ :return: The input value, required for argparse validation.
+ """
+ collection, dummy, dummy = name.partition(':')
+ if AnsibleCollectionRef.is_valid_collection_name(collection):
+ return name
+
+ raise AnsibleError("Invalid collection name '%s', "
+ "name must be in the format <namespace>.<collection>. \n"
+ "Please make sure namespace and collection name contains "
+ "characters from [a-zA-Z0-9_] only." % name)
+
+
+# NOTE: imported in ansible.cli.galaxy
+def validate_collection_path(collection_path): # type: (str) -> str
+ """Ensure a given path ends with 'ansible_collections'
+
+ :param collection_path: The path that should end in 'ansible_collections'
+ :return: collection_path ending in 'ansible_collections' if it does not already.
+ """
+
+ if os.path.split(collection_path)[1] != 'ansible_collections':
+ return os.path.join(collection_path, 'ansible_collections')
+
+ return collection_path
+
+
+def verify_collections(
+ collections, # type: Iterable[Requirement]
+ search_paths, # type: Iterable[str]
+ apis, # type: Iterable[GalaxyAPI]
+ ignore_errors, # type: bool
+ local_verify_only, # type: bool
+ artifacts_manager, # type: ConcreteArtifactsManager
+): # type: (...) -> List[CollectionVerifyResult]
+ r"""Verify the integrity of locally installed collections.
+
+ :param collections: The collections to check.
+ :param search_paths: Locations for the local collection lookup.
+ :param apis: A list of GalaxyAPIs to query when searching for a collection.
+ :param ignore_errors: Whether to ignore any errors when verifying the collection.
+ :param local_verify_only: When True, skip downloads and only verify local manifests.
+ :param artifacts_manager: Artifacts manager.
+ :return: list of CollectionVerifyResult objects describing the results of each collection verification
+ """
+ results = [] # type: List[CollectionVerifyResult]
+
+ api_proxy = MultiGalaxyAPIProxy(apis, artifacts_manager)
+
+ with _display_progress():
+ for collection in collections:
+ try:
+ if collection.is_concrete_artifact:
+ raise AnsibleError(
+ message="'{coll_type!s}' type is not supported. "
+ 'The format namespace.name is expected.'.
+ format(coll_type=collection.type)
+ )
+
+ # NOTE: Verify local collection exists before
+ # NOTE: downloading its source artifact from
+ # NOTE: a galaxy server.
+ default_err = 'Collection %s is not installed in any of the collection paths.' % collection.fqcn
+ for search_path in search_paths:
+ b_search_path = to_bytes(
+ os.path.join(
+ search_path,
+ collection.namespace, collection.name,
+ ),
+ errors='surrogate_or_strict',
+ )
+ if not os.path.isdir(b_search_path):
+ continue
+ if not _is_installed_collection_dir(b_search_path):
+ default_err = (
+ "Collection %s does not have a MANIFEST.json. "
+ "A MANIFEST.json is expected if the collection has been built "
+ "and installed via ansible-galaxy" % collection.fqcn
+ )
+ continue
+
+ local_collection = Candidate.from_dir_path(
+ b_search_path, artifacts_manager,
+ )
+ break
+ else:
+ raise AnsibleError(message=default_err)
+
+ if local_verify_only:
+ remote_collection = None
+ else:
+ remote_collection = Candidate(
+ collection.fqcn,
+ collection.ver if collection.ver != '*'
+ else local_collection.ver,
+ None, 'galaxy',
+ )
+
+ # Download collection on a galaxy server for comparison
+ try:
+ # NOTE: Trigger the lookup. If found, it'll cache
+ # NOTE: download URL and token in artifact manager.
+ api_proxy.get_collection_version_metadata(
+ remote_collection,
+ )
+ except AnsibleError as e: # FIXME: does this actually emit any errors?
+ # FIXME: extract the actual message and adjust this:
+ expected_error_msg = (
+ 'Failed to find collection {coll.fqcn!s}:{coll.ver!s}'.
+ format(coll=collection)
+ )
+ if e.message == expected_error_msg:
+ raise AnsibleError(
+ 'Failed to find remote collection '
+ "'{coll!s}' on any of the galaxy servers".
+ format(coll=collection)
+ )
+ raise
+
+ result = verify_local_collection(
+ local_collection, remote_collection,
+ artifacts_manager,
+ )
+
+ results.append(result)
+
+ except AnsibleError as err:
+ if ignore_errors:
+ display.warning(
+ "Failed to verify collection '{coll!s}' but skipping "
+ 'due to --ignore-errors being set. '
+ 'Error: {err!s}'.
+ format(coll=collection, err=to_text(err)),
+ )
+ else:
+ raise
+
+ return results
+
+
+@contextmanager
+def _tempdir():
+ b_temp_path = tempfile.mkdtemp(dir=to_bytes(C.DEFAULT_LOCAL_TMP, errors='surrogate_or_strict'))
+ try:
+ yield b_temp_path
+ finally:
+ shutil.rmtree(b_temp_path)
+
+
+@contextmanager
+def _display_progress(msg=None):
+ config_display = C.GALAXY_DISPLAY_PROGRESS
+ display_wheel = sys.stdout.isatty() if config_display is None else config_display
+
+ global display
+ if msg is not None:
+ display.display(msg)
+
+ if not display_wheel:
+ yield
+ return
+
+ def progress(display_queue, actual_display):
+ actual_display.debug("Starting display_progress display thread")
+ t = threading.current_thread()
+
+ while True:
+ for c in "|/-\\":
+ actual_display.display(c + "\b", newline=False)
+ time.sleep(0.1)
+
+ # Display a message from the main thread
+ while True:
+ try:
+ method, args, kwargs = display_queue.get(block=False, timeout=0.1)
+ except queue.Empty:
+ break
+ else:
+ func = getattr(actual_display, method)
+ func(*args, **kwargs)
+
+ if getattr(t, "finish", False):
+ actual_display.debug("Received end signal for display_progress display thread")
+ return
+
+ class DisplayThread(object):
+
+ def __init__(self, display_queue):
+ self.display_queue = display_queue
+
+ def __getattr__(self, attr):
+ def call_display(*args, **kwargs):
+ self.display_queue.put((attr, args, kwargs))
+
+ return call_display
+
+ # Temporary override the global display class with our own which add the calls to a queue for the thread to call.
+ old_display = display
+ try:
+ display_queue = queue.Queue()
+ display = DisplayThread(display_queue)
+ t = threading.Thread(target=progress, args=(display_queue, old_display))
+ t.daemon = True
+ t.start()
+
+ try:
+ yield
+ finally:
+ t.finish = True
+ t.join()
+ except Exception:
+ # The exception is re-raised so we can sure the thread is finished and not using the display anymore
+ raise
+ finally:
+ display = old_display
+
+
+def _verify_file_hash(b_path, filename, expected_hash, error_queue):
+ b_file_path = to_bytes(os.path.join(to_text(b_path), filename), errors='surrogate_or_strict')
+
+ if not os.path.isfile(b_file_path):
+ actual_hash = None
+ else:
+ with open(b_file_path, mode='rb') as file_object:
+ actual_hash = _consume_file(file_object)
+
+ if expected_hash != actual_hash:
+ error_queue.append(ModifiedContent(filename=filename, expected=expected_hash, installed=actual_hash))
+
+
+def _build_files_manifest(b_collection_path, namespace, name, ignore_patterns):
+ # type: (bytes, str, str, List[str]) -> FilesManifestType
+ # We always ignore .pyc and .retry files as well as some well known version control directories. The ignore
+ # patterns can be extended by the build_ignore key in galaxy.yml
+ b_ignore_patterns = [
+ b'galaxy.yml',
+ b'galaxy.yaml',
+ b'.git',
+ b'*.pyc',
+ b'*.retry',
+ b'tests/output', # Ignore ansible-test result output directory.
+ to_bytes('{0}-{1}-*.tar.gz'.format(namespace, name)), # Ignores previously built artifacts in the root dir.
+ ]
+ b_ignore_patterns += [to_bytes(p) for p in ignore_patterns]
+ b_ignore_dirs = frozenset([b'CVS', b'.bzr', b'.hg', b'.git', b'.svn', b'__pycache__', b'.tox'])
+
+ entry_template = {
+ 'name': None,
+ 'ftype': None,
+ 'chksum_type': None,
+ 'chksum_sha256': None,
+ 'format': MANIFEST_FORMAT
+ }
+ manifest = {
+ 'files': [
+ {
+ 'name': '.',
+ 'ftype': 'dir',
+ 'chksum_type': None,
+ 'chksum_sha256': None,
+ 'format': MANIFEST_FORMAT,
+ },
+ ],
+ 'format': MANIFEST_FORMAT,
+ } # type: FilesManifestType
+
+ def _walk(b_path, b_top_level_dir):
+ for b_item in os.listdir(b_path):
+ b_abs_path = os.path.join(b_path, b_item)
+ b_rel_base_dir = b'' if b_path == b_top_level_dir else b_path[len(b_top_level_dir) + 1:]
+ b_rel_path = os.path.join(b_rel_base_dir, b_item)
+ rel_path = to_text(b_rel_path, errors='surrogate_or_strict')
+
+ if os.path.isdir(b_abs_path):
+ if any(b_item == b_path for b_path in b_ignore_dirs) or \
+ any(fnmatch.fnmatch(b_rel_path, b_pattern) for b_pattern in b_ignore_patterns):
+ display.vvv("Skipping '%s' for collection build" % to_text(b_abs_path))
+ continue
+
+ if os.path.islink(b_abs_path):
+ b_link_target = os.path.realpath(b_abs_path)
+
+ if not _is_child_path(b_link_target, b_top_level_dir):
+ display.warning("Skipping '%s' as it is a symbolic link to a directory outside the collection"
+ % to_text(b_abs_path))
+ continue
+
+ manifest_entry = entry_template.copy()
+ manifest_entry['name'] = rel_path
+ manifest_entry['ftype'] = 'dir'
+
+ manifest['files'].append(manifest_entry)
+
+ if not os.path.islink(b_abs_path):
+ _walk(b_abs_path, b_top_level_dir)
+ else:
+ if any(fnmatch.fnmatch(b_rel_path, b_pattern) for b_pattern in b_ignore_patterns):
+ display.vvv("Skipping '%s' for collection build" % to_text(b_abs_path))
+ continue
+
+ # Handling of file symlinks occur in _build_collection_tar, the manifest for a symlink is the same for
+ # a normal file.
+ manifest_entry = entry_template.copy()
+ manifest_entry['name'] = rel_path
+ manifest_entry['ftype'] = 'file'
+ manifest_entry['chksum_type'] = 'sha256'
+ manifest_entry['chksum_sha256'] = secure_hash(b_abs_path, hash_func=sha256)
+
+ manifest['files'].append(manifest_entry)
+
+ _walk(b_collection_path, b_collection_path)
+
+ return manifest
+
+
+# FIXME: accept a dict produced from `galaxy.yml` instead of separate args
+def _build_manifest(namespace, name, version, authors, readme, tags, description, license_file,
+ dependencies, repository, documentation, homepage, issues, **kwargs):
+ manifest = {
+ 'collection_info': {
+ 'namespace': namespace,
+ 'name': name,
+ 'version': version,
+ 'authors': authors,
+ 'readme': readme,
+ 'tags': tags,
+ 'description': description,
+ 'license': kwargs['license'],
+ 'license_file': license_file or None, # Handle galaxy.yml having an empty string (None)
+ 'dependencies': dependencies,
+ 'repository': repository,
+ 'documentation': documentation,
+ 'homepage': homepage,
+ 'issues': issues,
+ },
+ 'file_manifest_file': {
+ 'name': 'FILES.json',
+ 'ftype': 'file',
+ 'chksum_type': 'sha256',
+ 'chksum_sha256': None, # Filled out in _build_collection_tar
+ 'format': MANIFEST_FORMAT
+ },
+ 'format': MANIFEST_FORMAT,
+ }
+
+ return manifest
+
+
+def _build_collection_tar(
+ b_collection_path, # type: bytes
+ b_tar_path, # type: bytes
+ collection_manifest, # type: CollectionManifestType
+ file_manifest, # type: FilesManifestType
+): # type: (...) -> Text
+ """Build a tar.gz collection artifact from the manifest data."""
+ files_manifest_json = to_bytes(json.dumps(file_manifest, indent=True), errors='surrogate_or_strict')
+ collection_manifest['file_manifest_file']['chksum_sha256'] = secure_hash_s(files_manifest_json, hash_func=sha256)
+ collection_manifest_json = to_bytes(json.dumps(collection_manifest, indent=True), errors='surrogate_or_strict')
+
+ with _tempdir() as b_temp_path:
+ b_tar_filepath = os.path.join(b_temp_path, os.path.basename(b_tar_path))
+
+ with tarfile.open(b_tar_filepath, mode='w:gz') as tar_file:
+ # Add the MANIFEST.json and FILES.json file to the archive
+ for name, b in [(MANIFEST_FILENAME, collection_manifest_json), ('FILES.json', files_manifest_json)]:
+ b_io = BytesIO(b)
+ tar_info = tarfile.TarInfo(name)
+ tar_info.size = len(b)
+ tar_info.mtime = int(time.time())
+ tar_info.mode = 0o0644
+ tar_file.addfile(tarinfo=tar_info, fileobj=b_io)
+
+ for file_info in file_manifest['files']: # type: ignore[union-attr]
+ if file_info['name'] == '.':
+ continue
+
+ # arcname expects a native string, cannot be bytes
+ filename = to_native(file_info['name'], errors='surrogate_or_strict')
+ b_src_path = os.path.join(b_collection_path, to_bytes(filename, errors='surrogate_or_strict'))
+
+ def reset_stat(tarinfo):
+ if tarinfo.type != tarfile.SYMTYPE:
+ existing_is_exec = tarinfo.mode & stat.S_IXUSR
+ tarinfo.mode = 0o0755 if existing_is_exec or tarinfo.isdir() else 0o0644
+ tarinfo.uid = tarinfo.gid = 0
+ tarinfo.uname = tarinfo.gname = ''
+
+ return tarinfo
+
+ if os.path.islink(b_src_path):
+ b_link_target = os.path.realpath(b_src_path)
+ if _is_child_path(b_link_target, b_collection_path):
+ b_rel_path = os.path.relpath(b_link_target, start=os.path.dirname(b_src_path))
+
+ tar_info = tarfile.TarInfo(filename)
+ tar_info.type = tarfile.SYMTYPE
+ tar_info.linkname = to_native(b_rel_path, errors='surrogate_or_strict')
+ tar_info = reset_stat(tar_info)
+ tar_file.addfile(tarinfo=tar_info)
+
+ continue
+
+ # Dealing with a normal file, just add it by name.
+ tar_file.add(
+ to_native(os.path.realpath(b_src_path)),
+ arcname=filename,
+ recursive=False,
+ filter=reset_stat,
+ )
+
+ shutil.copy(to_native(b_tar_filepath), to_native(b_tar_path))
+ collection_name = "%s.%s" % (collection_manifest['collection_info']['namespace'],
+ collection_manifest['collection_info']['name'])
+ tar_path = to_text(b_tar_path)
+ display.display(u'Created collection for %s at %s' % (collection_name, tar_path))
+ return tar_path
+
+
+def _build_collection_dir(b_collection_path, b_collection_output, collection_manifest, file_manifest):
+ """Build a collection directory from the manifest data.
+
+ This should follow the same pattern as _build_collection_tar.
+ """
+ os.makedirs(b_collection_output, mode=0o0755)
+
+ files_manifest_json = to_bytes(json.dumps(file_manifest, indent=True), errors='surrogate_or_strict')
+ collection_manifest['file_manifest_file']['chksum_sha256'] = secure_hash_s(files_manifest_json, hash_func=sha256)
+ collection_manifest_json = to_bytes(json.dumps(collection_manifest, indent=True), errors='surrogate_or_strict')
+
+ # Write contents to the files
+ for name, b in [(MANIFEST_FILENAME, collection_manifest_json), ('FILES.json', files_manifest_json)]:
+ b_path = os.path.join(b_collection_output, to_bytes(name, errors='surrogate_or_strict'))
+ with open(b_path, 'wb') as file_obj, BytesIO(b) as b_io:
+ shutil.copyfileobj(b_io, file_obj)
+
+ os.chmod(b_path, 0o0644)
+
+ base_directories = []
+ for file_info in sorted(file_manifest['files'], key=lambda x: x['name']):
+ if file_info['name'] == '.':
+ continue
+
+ src_file = os.path.join(b_collection_path, to_bytes(file_info['name'], errors='surrogate_or_strict'))
+ dest_file = os.path.join(b_collection_output, to_bytes(file_info['name'], errors='surrogate_or_strict'))
+
+ existing_is_exec = os.stat(src_file).st_mode & stat.S_IXUSR
+ mode = 0o0755 if existing_is_exec else 0o0644
+
+ if os.path.isdir(src_file):
+ mode = 0o0755
+ base_directories.append(src_file)
+ os.mkdir(dest_file, mode)
+ else:
+ shutil.copyfile(src_file, dest_file)
+
+ os.chmod(dest_file, mode)
+ collection_output = to_text(b_collection_output)
+ return collection_output
+
+
+def find_existing_collections(path, artifacts_manager):
+ """Locate all collections under a given path.
+
+ :param path: Collection dirs layout search path.
+ :param artifacts_manager: Artifacts manager.
+ """
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+
+ # FIXME: consider using `glob.glob()` to simplify looping
+ for b_namespace in os.listdir(b_path):
+ b_namespace_path = os.path.join(b_path, b_namespace)
+ if os.path.isfile(b_namespace_path):
+ continue
+
+ # FIXME: consider feeding b_namespace_path to Candidate.from_dir_path to get subdirs automatically
+ for b_collection in os.listdir(b_namespace_path):
+ b_collection_path = os.path.join(b_namespace_path, b_collection)
+ if not os.path.isdir(b_collection_path):
+ continue
+
+ try:
+ req = Candidate.from_dir_path_as_unknown(
+ b_collection_path,
+ artifacts_manager,
+ )
+ except ValueError as val_err:
+ raise_from(AnsibleError(val_err), val_err)
+
+ display.vvv(
+ u"Found installed collection {coll!s} at '{path!s}'".
+ format(coll=to_text(req), path=to_text(req.src))
+ )
+ yield req
+
+
+def install(collection, path, artifacts_manager): # FIXME: mv to dataclasses?
+ # type: (Candidate, str, ConcreteArtifactsManager) -> None
+ """Install a collection under a given path.
+
+ :param collection: Collection to be installed.
+ :param path: Collection dirs layout path.
+ :param artifacts_manager: Artifacts manager.
+ """
+ b_artifact_path = (
+ artifacts_manager.get_artifact_path if collection.is_concrete_artifact
+ else artifacts_manager.get_galaxy_artifact_path
+ )(collection)
+
+ collection_path = os.path.join(path, collection.namespace, collection.name)
+ b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict')
+ display.display(
+ u"Installing '{coll!s}' to '{path!s}'".
+ format(coll=to_text(collection), path=collection_path),
+ )
+
+ if os.path.exists(b_collection_path):
+ shutil.rmtree(b_collection_path)
+
+ if collection.is_dir:
+ install_src(collection, b_artifact_path, b_collection_path, artifacts_manager)
+ else:
+ install_artifact(b_artifact_path, b_collection_path, artifacts_manager._b_working_directory)
+
+ display.display(
+ '{coll!s} was installed successfully'.
+ format(coll=to_text(collection)),
+ )
+
+
+def install_artifact(b_coll_targz_path, b_collection_path, b_temp_path):
+ """Install a collection from tarball under a given path.
+
+ :param b_coll_targz_path: Collection tarball to be installed.
+ :param b_collection_path: Collection dirs layout path.
+ :param b_temp_path: Temporary dir path.
+ """
+ try:
+ with tarfile.open(b_coll_targz_path, mode='r') as collection_tar:
+ files_member_obj = collection_tar.getmember('FILES.json')
+ with _tarfile_extract(collection_tar, files_member_obj) as (dummy, files_obj):
+ files = json.loads(to_text(files_obj.read(), errors='surrogate_or_strict'))
+
+ _extract_tar_file(collection_tar, MANIFEST_FILENAME, b_collection_path, b_temp_path)
+ _extract_tar_file(collection_tar, 'FILES.json', b_collection_path, b_temp_path)
+
+ for file_info in files['files']:
+ file_name = file_info['name']
+ if file_name == '.':
+ continue
+
+ if file_info['ftype'] == 'file':
+ _extract_tar_file(collection_tar, file_name, b_collection_path, b_temp_path,
+ expected_hash=file_info['chksum_sha256'])
+
+ else:
+ _extract_tar_dir(collection_tar, file_name, b_collection_path)
+
+ except Exception:
+ # Ensure we don't leave the dir behind in case of a failure.
+ shutil.rmtree(b_collection_path)
+
+ b_namespace_path = os.path.dirname(b_collection_path)
+ if not os.listdir(b_namespace_path):
+ os.rmdir(b_namespace_path)
+
+ raise
+
+
+def install_src(
+ collection,
+ b_collection_path, b_collection_output_path,
+ artifacts_manager,
+):
+ r"""Install the collection from source control into given dir.
+
+ Generates the Ansible collection artifact data from a galaxy.yml and
+ installs the artifact to a directory.
+ This should follow the same pattern as build_collection, but instead
+ of creating an artifact, install it.
+
+ :param collection: Collection to be installed.
+ :param b_collection_path: Collection dirs layout path.
+ :param b_collection_output_path: The installation directory for the \
+ collection artifact.
+ :param artifacts_manager: Artifacts manager.
+
+ :raises AnsibleError: If no collection metadata found.
+ """
+ collection_meta = artifacts_manager.get_direct_collection_meta(collection)
+
+ if 'build_ignore' not in collection_meta: # installed collection, not src
+ # FIXME: optimize this? use a different process? copy instead of build?
+ collection_meta['build_ignore'] = []
+ collection_manifest = _build_manifest(**collection_meta)
+ file_manifest = _build_files_manifest(
+ b_collection_path,
+ collection_meta['namespace'], collection_meta['name'],
+ collection_meta['build_ignore'],
+ )
+
+ collection_output_path = _build_collection_dir(
+ b_collection_path, b_collection_output_path,
+ collection_manifest, file_manifest,
+ )
+
+ display.display(
+ 'Created collection for {coll!s} at {path!s}'.
+ format(coll=collection, path=collection_output_path)
+ )
+
+
+def _extract_tar_dir(tar, dirname, b_dest):
+ """ Extracts a directory from a collection tar. """
+ member_names = [to_native(dirname, errors='surrogate_or_strict')]
+
+ # Create list of members with and without trailing separator
+ if not member_names[-1].endswith(os.path.sep):
+ member_names.append(member_names[-1] + os.path.sep)
+
+ # Try all of the member names and stop on the first one that are able to successfully get
+ for member in member_names:
+ try:
+ tar_member = tar.getmember(member)
+ except KeyError:
+ continue
+ break
+ else:
+ # If we still can't find the member, raise a nice error.
+ raise AnsibleError("Unable to extract '%s' from collection" % to_native(member, errors='surrogate_or_strict'))
+
+ b_dir_path = os.path.join(b_dest, to_bytes(dirname, errors='surrogate_or_strict'))
+
+ b_parent_path = os.path.dirname(b_dir_path)
+ try:
+ os.makedirs(b_parent_path, mode=0o0755)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+ if tar_member.type == tarfile.SYMTYPE:
+ b_link_path = to_bytes(tar_member.linkname, errors='surrogate_or_strict')
+ if not _is_child_path(b_link_path, b_dest, link_name=b_dir_path):
+ raise AnsibleError("Cannot extract symlink '%s' in collection: path points to location outside of "
+ "collection '%s'" % (to_native(dirname), b_link_path))
+
+ os.symlink(b_link_path, b_dir_path)
+
+ else:
+ if not os.path.isdir(b_dir_path):
+ os.mkdir(b_dir_path, 0o0755)
+
+
+def _extract_tar_file(tar, filename, b_dest, b_temp_path, expected_hash=None):
+ """ Extracts a file from a collection tar. """
+ with _get_tar_file_member(tar, filename) as (tar_member, tar_obj):
+ if tar_member.type == tarfile.SYMTYPE:
+ actual_hash = _consume_file(tar_obj)
+
+ else:
+ with tempfile.NamedTemporaryFile(dir=b_temp_path, delete=False) as tmpfile_obj:
+ actual_hash = _consume_file(tar_obj, tmpfile_obj)
+
+ if expected_hash and actual_hash != expected_hash:
+ raise AnsibleError("Checksum mismatch for '%s' inside collection at '%s'"
+ % (to_native(filename, errors='surrogate_or_strict'), to_native(tar.name)))
+
+ b_dest_filepath = os.path.abspath(os.path.join(b_dest, to_bytes(filename, errors='surrogate_or_strict')))
+ b_parent_dir = os.path.dirname(b_dest_filepath)
+ if not _is_child_path(b_parent_dir, b_dest):
+ raise AnsibleError("Cannot extract tar entry '%s' as it will be placed outside the collection directory"
+ % to_native(filename, errors='surrogate_or_strict'))
+
+ if not os.path.exists(b_parent_dir):
+ # Seems like Galaxy does not validate if all file entries have a corresponding dir ftype entry. This check
+ # makes sure we create the parent directory even if it wasn't set in the metadata.
+ os.makedirs(b_parent_dir, mode=0o0755)
+
+ if tar_member.type == tarfile.SYMTYPE:
+ b_link_path = to_bytes(tar_member.linkname, errors='surrogate_or_strict')
+ if not _is_child_path(b_link_path, b_dest, link_name=b_dest_filepath):
+ raise AnsibleError("Cannot extract symlink '%s' in collection: path points to location outside of "
+ "collection '%s'" % (to_native(filename), b_link_path))
+
+ os.symlink(b_link_path, b_dest_filepath)
+
+ else:
+ shutil.move(to_bytes(tmpfile_obj.name, errors='surrogate_or_strict'), b_dest_filepath)
+
+ # Default to rw-r--r-- and only add execute if the tar file has execute.
+ tar_member = tar.getmember(to_native(filename, errors='surrogate_or_strict'))
+ new_mode = 0o644
+ if stat.S_IMODE(tar_member.mode) & stat.S_IXUSR:
+ new_mode |= 0o0111
+
+ os.chmod(b_dest_filepath, new_mode)
+
+
+def _get_tar_file_member(tar, filename):
+ n_filename = to_native(filename, errors='surrogate_or_strict')
+ try:
+ member = tar.getmember(n_filename)
+ except KeyError:
+ raise AnsibleError("Collection tar at '%s' does not contain the expected file '%s'." % (
+ to_native(tar.name),
+ n_filename))
+
+ return _tarfile_extract(tar, member)
+
+
+def _get_json_from_tar_file(b_path, filename):
+ file_contents = ''
+
+ with tarfile.open(b_path, mode='r') as collection_tar:
+ with _get_tar_file_member(collection_tar, filename) as (dummy, tar_obj):
+ bufsize = 65536
+ data = tar_obj.read(bufsize)
+ while data:
+ file_contents += to_text(data)
+ data = tar_obj.read(bufsize)
+
+ return json.loads(file_contents)
+
+
+def _get_tar_file_hash(b_path, filename):
+ with tarfile.open(b_path, mode='r') as collection_tar:
+ with _get_tar_file_member(collection_tar, filename) as (dummy, tar_obj):
+ return _consume_file(tar_obj)
+
+
+def _get_file_hash(b_path, filename): # type: (bytes, str) -> str
+ filepath = os.path.join(b_path, to_bytes(filename, errors='surrogate_or_strict'))
+ with open(filepath, 'rb') as fp:
+ return _consume_file(fp)
+
+
+def _is_child_path(path, parent_path, link_name=None):
+ """ Checks that path is a path within the parent_path specified. """
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+
+ if link_name and not os.path.isabs(b_path):
+ # If link_name is specified, path is the source of the link and we need to resolve the absolute path.
+ b_link_dir = os.path.dirname(to_bytes(link_name, errors='surrogate_or_strict'))
+ b_path = os.path.abspath(os.path.join(b_link_dir, b_path))
+
+ b_parent_path = to_bytes(parent_path, errors='surrogate_or_strict')
+ return b_path == b_parent_path or b_path.startswith(b_parent_path + to_bytes(os.path.sep))
+
+
+def _resolve_depenency_map(
+ requested_requirements, # type: Iterable[Requirement]
+ galaxy_apis, # type: Iterable[GalaxyAPI]
+ concrete_artifacts_manager, # type: ConcreteArtifactsManager
+ preferred_candidates, # type: Optional[Iterable[Candidate]]
+ no_deps, # type: bool
+ allow_pre_release, # type: bool
+ upgrade, # type: bool
+): # type: (...) -> Dict[str, Candidate]
+ """Return the resolved dependency map."""
+ collection_dep_resolver = build_collection_dependency_resolver(
+ galaxy_apis=galaxy_apis,
+ concrete_artifacts_manager=concrete_artifacts_manager,
+ user_requirements=requested_requirements,
+ preferred_candidates=preferred_candidates,
+ with_deps=not no_deps,
+ with_pre_releases=allow_pre_release,
+ upgrade=upgrade,
+ )
+ try:
+ return collection_dep_resolver.resolve(
+ requested_requirements,
+ max_rounds=2000000, # NOTE: same constant pip uses
+ ).mapping
+ except CollectionDependencyResolutionImpossible as dep_exc:
+ conflict_causes = (
+ '* {req.fqcn!s}:{req.ver!s} ({dep_origin!s})'.format(
+ req=req_inf.requirement,
+ dep_origin='direct request'
+ if req_inf.parent is None
+ else 'dependency of {parent!s}'.
+ format(parent=req_inf.parent),
+ )
+ for req_inf in dep_exc.causes
+ )
+ error_msg_lines = chain(
+ (
+ 'Failed to resolve the requested '
+ 'dependencies map. Could not satisfy the following '
+ 'requirements:',
+ ),
+ conflict_causes,
+ )
+ raise raise_from( # NOTE: Leading "raise" is a hack for mypy bug #9717
+ AnsibleError('\n'.join(error_msg_lines)),
+ dep_exc,
+ )
+ except CollectionDependencyInconsistentCandidate as dep_exc:
+ parents = [
+ "%s.%s:%s" % (p.namespace, p.name, p.ver)
+ for p in dep_exc.criterion.iter_parent()
+ if p is not None
+ ]
+
+ error_msg_lines = [
+ (
+ 'Failed to resolve the requested dependencies map. '
+ 'Got the candidate {req.fqcn!s}:{req.ver!s} ({dep_origin!s}) '
+ 'which didn\'t satisfy all of the following requirements:'.
+ format(
+ req=dep_exc.candidate,
+ dep_origin='direct request'
+ if not parents else 'dependency of {parent!s}'.
+ format(parent=', '.join(parents))
+ )
+ )
+ ]
+
+ for req in dep_exc.criterion.iter_requirement():
+ error_msg_lines.append(
+ '* {req.fqcn!s}:{req.ver!s}'.format(req=req)
+ )
+
+ raise raise_from( # NOTE: Leading "raise" is a hack for mypy bug #9717
+ AnsibleError('\n'.join(error_msg_lines)),
+ dep_exc,
+ )
diff --git a/lib/ansible/galaxy/collection/concrete_artifact_manager.py b/lib/ansible/galaxy/collection/concrete_artifact_manager.py
new file mode 100644
index 00000000..b2550abd
--- /dev/null
+++ b/lib/ansible/galaxy/collection/concrete_artifact_manager.py
@@ -0,0 +1,654 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020-2021, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""Concrete collection candidate management helper module."""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import tarfile
+import subprocess
+from contextlib import contextmanager
+from hashlib import sha256
+from shutil import rmtree
+from tempfile import mkdtemp
+
+try:
+ from typing import TYPE_CHECKING
+except ImportError:
+ TYPE_CHECKING = False
+
+if TYPE_CHECKING:
+ from typing import (
+ Any, # FIXME: !!!111
+ BinaryIO, Dict, IO,
+ Iterator, List, Optional,
+ Set, Tuple, Type, Union,
+ )
+
+ from ansible.galaxy.dependency_resolution.dataclasses import (
+ Candidate, Requirement,
+ )
+ from ansible.galaxy.token import GalaxyToken
+
+from ansible.errors import AnsibleError
+from ansible.galaxy import get_collections_galaxy_meta_info
+from ansible.galaxy.dependency_resolution.dataclasses import _GALAXY_YAML
+from ansible.galaxy.user_agent import user_agent
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.six.moves.urllib.error import URLError
+from ansible.module_utils.six.moves.urllib.parse import urldefrag
+from ansible.module_utils.six import raise_from
+from ansible.module_utils.urls import open_url
+from ansible.utils.display import Display
+
+import yaml
+
+
+display = Display()
+
+MANIFEST_FILENAME = 'MANIFEST.json'
+
+
+class ConcreteArtifactsManager:
+ """Manager for on-disk collection artifacts.
+
+ It is responsible for:
+ * downloading remote collections from Galaxy-compatible servers and
+ direct links to tarballs or SCM repositories
+ * keeping track of local ones
+ * keeping track of Galaxy API tokens for downloads from Galaxy'ish
+ as well as the artifact hashes
+ * caching all of above
+ * retrieving the metadata out of the downloaded artifacts
+ """
+
+ def __init__(self, b_working_directory, validate_certs=True):
+ # type: (bytes, bool) -> None
+ """Initialize ConcreteArtifactsManager caches and costraints."""
+ self._validate_certs = validate_certs # type: bool
+ self._artifact_cache = {} # type: Dict[bytes, bytes]
+ self._galaxy_artifact_cache = {} # type: Dict[Union[Candidate, Requirement], bytes]
+ self._artifact_meta_cache = {} # type: Dict[bytes, Dict[str, Optional[Union[str, List[str], Dict[str, str]]]]]
+ self._galaxy_collection_cache = {} # type: Dict[Union[Candidate, Requirement], Tuple[str, str, GalaxyToken]]
+ self._b_working_directory = b_working_directory # type: bytes
+
+ def get_galaxy_artifact_path(self, collection):
+ # type: (Union[Candidate, Requirement]) -> bytes
+ """Given a Galaxy-stored collection, return a cached path.
+
+ If it's not yet on disk, this method downloads the artifact first.
+ """
+ try:
+ return self._galaxy_artifact_cache[collection]
+ except KeyError:
+ pass
+
+ try:
+ url, sha256_hash, token = self._galaxy_collection_cache[collection]
+ except KeyError as key_err:
+ raise_from(
+ RuntimeError(
+ 'The is no known source for {coll!s}'.
+ format(coll=collection),
+ ),
+ key_err,
+ )
+
+ display.vvvv(
+ "Fetching a collection tarball for '{collection!s}' from "
+ 'Ansible Galaxy'.format(collection=collection),
+ )
+
+ try:
+ b_artifact_path = _download_file(
+ url,
+ self._b_working_directory,
+ expected_hash=sha256_hash,
+ validate_certs=self._validate_certs,
+ token=token,
+ ) # type: bytes
+ except URLError as err:
+ raise_from(
+ AnsibleError(
+ 'Failed to download collection tar '
+ "from '{coll_src!s}': {download_err!s}".
+ format(
+ coll_src=to_native(collection.src),
+ download_err=to_native(err),
+ ),
+ ),
+ err,
+ )
+ else:
+ display.vvv(
+ "Collection '{coll!s}' obtained from "
+ 'server {server!s} {url!s}'.format(
+ coll=collection, server=collection.src or 'Galaxy',
+ url=collection.src.api_server if collection.src is not None
+ else '',
+ )
+ )
+
+ self._galaxy_artifact_cache[collection] = b_artifact_path
+ return b_artifact_path
+
+ def get_artifact_path(self, collection):
+ # type: (Union[Candidate, Requirement]) -> bytes
+ """Given a concrete collection pointer, return a cached path.
+
+ If it's not yet on disk, this method downloads the artifact first.
+ """
+ try:
+ return self._artifact_cache[collection.src]
+ except KeyError:
+ pass
+
+ # NOTE: SCM needs to be special-cased as it may contain either
+ # NOTE: one collection in its root, or a number of top-level
+ # NOTE: collection directories instead.
+ # NOTE: The idea is to store the SCM collection as unpacked
+ # NOTE: directory structure under the temporary location and use
+ # NOTE: a "virtual" collection that has pinned requirements on
+ # NOTE: the directories under that SCM checkout that correspond
+ # NOTE: to collections.
+ # NOTE: This brings us to the idea that we need two separate
+ # NOTE: virtual Requirement/Candidate types --
+ # NOTE: (single) dir + (multidir) subdirs
+ if collection.is_url:
+ display.vvvv(
+ "Collection requirement '{collection!s}' is a URL "
+ 'to a tar artifact'.format(collection=collection.fqcn),
+ )
+ try:
+ b_artifact_path = _download_file(
+ collection.src,
+ self._b_working_directory,
+ expected_hash=None, # NOTE: URLs don't support checksums
+ validate_certs=self._validate_certs,
+ )
+ except URLError as err:
+ raise_from(
+ AnsibleError(
+ 'Failed to download collection tar '
+ "from '{coll_src!s}': {download_err!s}".
+ format(
+ coll_src=to_native(collection.src),
+ download_err=to_native(err),
+ ),
+ ),
+ err,
+ )
+ elif collection.is_scm:
+ b_artifact_path = _extract_collection_from_git(
+ collection.src,
+ collection.ver,
+ self._b_working_directory,
+ )
+ elif collection.is_file or collection.is_dir or collection.is_subdirs:
+ b_artifact_path = to_bytes(collection.src)
+ else:
+ # NOTE: This may happen `if collection.is_online_index_pointer`
+ raise RuntimeError(
+ 'The artifact is of an unexpected type {art_type!s}'.
+ format(art_type=collection.type)
+ )
+
+ self._artifact_cache[collection.src] = b_artifact_path
+ return b_artifact_path
+
+ def _get_direct_collection_namespace(self, collection):
+ # type: (Candidate) -> Optional[str]
+ return self.get_direct_collection_meta(collection)['namespace'] # type: ignore[return-value]
+
+ def _get_direct_collection_name(self, collection):
+ # type: (Candidate) -> Optional[str]
+ return self.get_direct_collection_meta(collection)['name'] # type: ignore[return-value]
+
+ def get_direct_collection_fqcn(self, collection):
+ # type: (Candidate) -> Optional[str]
+ """Extract FQCN from the given on-disk collection artifact.
+
+ If the collection is virtual, ``None`` is returned instead
+ of a string.
+ """
+ if collection.is_virtual:
+ # NOTE: should it be something like "<virtual>"?
+ return None
+
+ return '.'.join(( # type: ignore[type-var]
+ self._get_direct_collection_namespace(collection), # type: ignore[arg-type]
+ self._get_direct_collection_name(collection),
+ ))
+
+ def get_direct_collection_version(self, collection):
+ # type: (Union[Candidate, Requirement]) -> str
+ """Extract version from the given on-disk collection artifact."""
+ return self.get_direct_collection_meta(collection)['version'] # type: ignore[return-value]
+
+ def get_direct_collection_dependencies(self, collection):
+ # type: (Union[Candidate, Requirement]) -> Dict[str, str]
+ """Extract deps from the given on-disk collection artifact."""
+ return self.get_direct_collection_meta(collection)['dependencies'] # type: ignore[return-value]
+
+ def get_direct_collection_meta(self, collection):
+ # type: (Union[Candidate, Requirement]) -> Dict[str, Optional[Union[str, Dict[str, str], List[str]]]]
+ """Extract meta from the given on-disk collection artifact."""
+ try: # FIXME: use unique collection identifier as a cache key?
+ return self._artifact_meta_cache[collection.src]
+ except KeyError:
+ b_artifact_path = self.get_artifact_path(collection)
+
+ if collection.is_url or collection.is_file:
+ collection_meta = _get_meta_from_tar(b_artifact_path)
+ elif collection.is_dir: # should we just build a coll instead?
+ # FIXME: what if there's subdirs?
+ try:
+ collection_meta = _get_meta_from_dir(b_artifact_path)
+ except LookupError as lookup_err:
+ raise_from(
+ AnsibleError(
+ 'Failed to find the collection dir deps: {err!s}'.
+ format(err=to_native(lookup_err)),
+ ),
+ lookup_err,
+ )
+ elif collection.is_scm:
+ collection_meta = {
+ 'name': None,
+ 'namespace': None,
+ 'dependencies': {to_native(b_artifact_path): '*'},
+ 'version': '*',
+ }
+ elif collection.is_subdirs:
+ collection_meta = {
+ 'name': None,
+ 'namespace': None,
+ # NOTE: Dropping b_artifact_path since it's based on src anyway
+ 'dependencies': dict.fromkeys(
+ map(to_native, collection.namespace_collection_paths),
+ '*',
+ ),
+ 'version': '*',
+ }
+ else:
+ raise RuntimeError
+
+ self._artifact_meta_cache[collection.src] = collection_meta
+ return collection_meta
+
+ def save_collection_source(self, collection, url, sha256_hash, token):
+ # type: (Candidate, str, str, GalaxyToken) -> None
+ """Store collection URL, SHA256 hash and Galaxy API token.
+
+ This is a hook that is supposed to be called before attempting to
+ download Galaxy-based collections with ``get_galaxy_artifact_path()``.
+ """
+ self._galaxy_collection_cache[collection] = url, sha256_hash, token
+
+ @classmethod
+ @contextmanager
+ def under_tmpdir(
+ cls, # type: Type[ConcreteArtifactsManager]
+ temp_dir_base, # type: str
+ validate_certs=True, # type: bool
+ ): # type: (...) -> Iterator[ConcreteArtifactsManager]
+ """Custom ConcreteArtifactsManager constructor with temp dir.
+
+ This method returns a context manager that allocates and cleans
+ up a temporary directory for caching the collection artifacts
+ during the dependency resolution process.
+ """
+ # NOTE: Can't use `with tempfile.TemporaryDirectory:`
+ # NOTE: because it's not in Python 2 stdlib.
+ temp_path = mkdtemp(
+ dir=to_bytes(temp_dir_base, errors='surrogate_or_strict'),
+ )
+ b_temp_path = to_bytes(temp_path, errors='surrogate_or_strict')
+ try:
+ yield cls(b_temp_path, validate_certs)
+ finally:
+ rmtree(b_temp_path)
+
+
+def parse_scm(collection, version):
+ """Extract name, version, path and subdir out of the SCM pointer."""
+ if ',' in collection:
+ collection, version = collection.split(',', 1)
+ elif version == '*' or not version:
+ version = 'HEAD'
+
+ if collection.startswith('git+'):
+ path = collection[4:]
+ else:
+ path = collection
+
+ path, fragment = urldefrag(path)
+ fragment = fragment.strip(os.path.sep)
+
+ if path.endswith(os.path.sep + '.git'):
+ name = path.split(os.path.sep)[-2]
+ elif '://' not in path and '@' not in path:
+ name = path
+ else:
+ name = path.split('/')[-1]
+ if name.endswith('.git'):
+ name = name[:-4]
+
+ return name, version, path, fragment
+
+
+def _extract_collection_from_git(repo_url, coll_ver, b_path):
+ name, version, git_url, fragment = parse_scm(repo_url, coll_ver)
+ b_checkout_path = mkdtemp(
+ dir=b_path,
+ prefix=to_bytes(name, errors='surrogate_or_strict'),
+ ) # type: bytes
+ git_clone_cmd = 'git', 'clone', git_url, to_text(b_checkout_path)
+ # FIXME: '--depth', '1', '--branch', version
+ try:
+ subprocess.check_call(git_clone_cmd)
+ except subprocess.CalledProcessError as proc_err:
+ raise_from(
+ AnsibleError( # should probably be LookupError
+ 'Failed to clone a Git repository from `{repo_url!s}`.'.
+ format(repo_url=to_native(git_url)),
+ ),
+ proc_err,
+ )
+
+ git_switch_cmd = 'git', 'checkout', to_text(version)
+ try:
+ subprocess.check_call(git_switch_cmd, cwd=b_checkout_path)
+ except subprocess.CalledProcessError as proc_err:
+ raise_from(
+ AnsibleError( # should probably be LookupError
+ 'Failed to switch a cloned Git repo `{repo_url!s}` '
+ 'to the requested revision `{commitish!s}`.'.
+ format(
+ commitish=to_native(version),
+ repo_url=to_native(git_url),
+ ),
+ ),
+ proc_err,
+ )
+
+ return (
+ os.path.join(b_checkout_path, to_bytes(fragment))
+ if fragment else b_checkout_path
+ )
+
+
+# FIXME: use random subdirs while preserving the file names
+def _download_file(url, b_path, expected_hash, validate_certs, token=None):
+ # type: (str, bytes, Optional[str], bool, GalaxyToken) -> bytes
+ # ^ NOTE: used in download and verify_collections ^
+ b_tarball_name = to_bytes(
+ url.rsplit('/', 1)[1], errors='surrogate_or_strict',
+ )
+ b_file_name = b_tarball_name[:-len('.tar.gz')]
+
+ b_tarball_dir = mkdtemp(
+ dir=b_path,
+ prefix=b'-'.join((b_file_name, b'')),
+ ) # type: bytes
+
+ b_file_path = os.path.join(b_tarball_dir, b_tarball_name)
+
+ display.display("Downloading %s to %s" % (url, to_text(b_tarball_dir)))
+ # NOTE: Galaxy redirects downloads to S3 which rejects the request
+ # NOTE: if an Authorization header is attached so don't redirect it
+ resp = open_url(
+ to_native(url, errors='surrogate_or_strict'),
+ validate_certs=validate_certs,
+ headers=None if token is None else token.headers(),
+ unredirected_headers=['Authorization'], http_agent=user_agent(),
+ )
+
+ with open(b_file_path, 'wb') as download_file: # type: BinaryIO
+ actual_hash = _consume_file(resp, write_to=download_file)
+
+ if expected_hash:
+ display.vvvv(
+ 'Validating downloaded file hash {actual_hash!s} with '
+ 'expected hash {expected_hash!s}'.
+ format(actual_hash=actual_hash, expected_hash=expected_hash)
+ )
+ if expected_hash != actual_hash:
+ raise AnsibleError('Mismatch artifact hash with downloaded file')
+
+ return b_file_path
+
+
+def _consume_file(read_from, write_to=None):
+ # type: (BinaryIO, BinaryIO) -> str
+ bufsize = 65536
+ sha256_digest = sha256()
+ data = read_from.read(bufsize)
+ while data:
+ if write_to is not None:
+ write_to.write(data)
+ write_to.flush()
+ sha256_digest.update(data)
+ data = read_from.read(bufsize)
+
+ return sha256_digest.hexdigest()
+
+
+def _normalize_galaxy_yml_manifest(
+ galaxy_yml, # type: Dict[str, Optional[Union[str, List[str], Dict[str, str]]]]
+ b_galaxy_yml_path, # type: bytes
+):
+ # type: (...) -> Dict[str, Optional[Union[str, List[str], Dict[str, str]]]]
+ galaxy_yml_schema = (
+ get_collections_galaxy_meta_info()
+ ) # type: List[Dict[str, Any]] # FIXME: <--
+ # FIXME: 👆maybe precise type: List[Dict[str, Union[bool, str, List[str]]]]
+
+ mandatory_keys = set()
+ string_keys = set() # type: Set[str]
+ list_keys = set() # type: Set[str]
+ dict_keys = set() # type: Set[str]
+
+ for info in galaxy_yml_schema:
+ if info.get('required', False):
+ mandatory_keys.add(info['key'])
+
+ key_list_type = {
+ 'str': string_keys,
+ 'list': list_keys,
+ 'dict': dict_keys,
+ }[info.get('type', 'str')]
+ key_list_type.add(info['key'])
+
+ all_keys = frozenset(list(mandatory_keys) + list(string_keys) + list(list_keys) + list(dict_keys))
+
+ set_keys = set(galaxy_yml.keys())
+ missing_keys = mandatory_keys.difference(set_keys)
+ if missing_keys:
+ raise AnsibleError("The collection galaxy.yml at '%s' is missing the following mandatory keys: %s"
+ % (to_native(b_galaxy_yml_path), ", ".join(sorted(missing_keys))))
+
+ extra_keys = set_keys.difference(all_keys)
+ if len(extra_keys) > 0:
+ display.warning("Found unknown keys in collection galaxy.yml at '%s': %s"
+ % (to_text(b_galaxy_yml_path), ", ".join(extra_keys)))
+
+ # Add the defaults if they have not been set
+ for optional_string in string_keys:
+ if optional_string not in galaxy_yml:
+ galaxy_yml[optional_string] = None
+
+ for optional_list in list_keys:
+ list_val = galaxy_yml.get(optional_list, None)
+
+ if list_val is None:
+ galaxy_yml[optional_list] = []
+ elif not isinstance(list_val, list):
+ galaxy_yml[optional_list] = [list_val] # type: ignore[list-item]
+
+ for optional_dict in dict_keys:
+ if optional_dict not in galaxy_yml:
+ galaxy_yml[optional_dict] = {}
+
+ # NOTE: `version: null` is only allowed for `galaxy.yml`
+ # NOTE: and not `MANIFEST.json`. The use-case for it is collections
+ # NOTE: that generate the version from Git before building a
+ # NOTE: distributable tarball artifact.
+ if not galaxy_yml.get('version'):
+ galaxy_yml['version'] = '*'
+
+ return galaxy_yml
+
+
+def _get_meta_from_dir(
+ b_path, # type: bytes
+): # type: (...) -> Dict[str, Optional[Union[str, List[str], Dict[str, str]]]]
+ try:
+ return _get_meta_from_installed_dir(b_path)
+ except LookupError:
+ return _get_meta_from_src_dir(b_path)
+
+
+def _get_meta_from_src_dir(
+ b_path, # type: bytes
+): # type: (...) -> Dict[str, Optional[Union[str, List[str], Dict[str, str]]]]
+ galaxy_yml = os.path.join(b_path, _GALAXY_YAML)
+ if not os.path.isfile(galaxy_yml):
+ raise LookupError(
+ "The collection galaxy.yml path '{path!s}' does not exist.".
+ format(path=to_native(galaxy_yml))
+ )
+
+ with open(galaxy_yml, 'rb') as manifest_file_obj:
+ try:
+ manifest = yaml.safe_load(manifest_file_obj)
+ except yaml.error.YAMLError as yaml_err:
+ raise_from(
+ AnsibleError(
+ "Failed to parse the galaxy.yml at '{path!s}' with "
+ 'the following error:\n{err_txt!s}'.
+ format(
+ path=to_native(galaxy_yml),
+ err_txt=to_native(yaml_err),
+ ),
+ ),
+ yaml_err,
+ )
+
+ return _normalize_galaxy_yml_manifest(manifest, galaxy_yml)
+
+
+def _get_json_from_installed_dir(
+ b_path, # type: bytes
+ filename, # type: str
+): # type: (...) -> Dict
+
+ b_json_filepath = os.path.join(b_path, to_bytes(filename, errors='surrogate_or_strict'))
+
+ try:
+ with open(b_json_filepath, 'rb') as manifest_fd:
+ b_json_text = manifest_fd.read()
+ except (IOError, OSError):
+ raise LookupError(
+ "The collection {manifest!s} path '{path!s}' does not exist.".
+ format(
+ manifest=filename,
+ path=to_native(b_json_filepath),
+ )
+ )
+
+ manifest_txt = to_text(b_json_text, errors='surrogate_or_strict')
+
+ try:
+ manifest = json.loads(manifest_txt)
+ except ValueError:
+ raise AnsibleError(
+ 'Collection tar file member {member!s} does not '
+ 'contain a valid json string.'.
+ format(member=filename),
+ )
+
+ return manifest
+
+
+def _get_meta_from_installed_dir(
+ b_path, # type: bytes
+): # type: (...) -> Dict[str, Optional[Union[str, List[str], Dict[str, str]]]]
+ manifest = _get_json_from_installed_dir(b_path, MANIFEST_FILENAME)
+ collection_info = manifest['collection_info']
+
+ version = collection_info.get('version')
+ if not version:
+ raise AnsibleError(
+ u'Collection metadata file `{manifest_filename!s}` at `{meta_file!s}` is expected '
+ u'to have a valid SemVer version value but got {version!s}'.
+ format(
+ manifest_filename=MANIFEST_FILENAME,
+ meta_file=to_text(b_path),
+ version=to_text(repr(version)),
+ ),
+ )
+
+ return collection_info
+
+
+def _get_meta_from_tar(
+ b_path, # type: bytes
+): # type: (...) -> Dict[str, Optional[Union[str, List[str], Dict[str, str]]]]
+ if not tarfile.is_tarfile(b_path):
+ raise AnsibleError(
+ "Collection artifact at '{path!s}' is not a valid tar file.".
+ format(path=to_native(b_path)),
+ )
+
+ with tarfile.open(b_path, mode='r') as collection_tar: # type: tarfile.TarFile
+ try:
+ member = collection_tar.getmember(MANIFEST_FILENAME)
+ except KeyError:
+ raise AnsibleError(
+ "Collection at '{path!s}' does not contain the "
+ 'required file {manifest_file!s}.'.
+ format(
+ path=to_native(b_path),
+ manifest_file=MANIFEST_FILENAME,
+ ),
+ )
+
+ with _tarfile_extract(collection_tar, member) as (_member, member_obj):
+ if member_obj is None:
+ raise AnsibleError(
+ 'Collection tar file does not contain '
+ 'member {member!s}'.format(member=MANIFEST_FILENAME),
+ )
+
+ text_content = to_text(
+ member_obj.read(),
+ errors='surrogate_or_strict',
+ )
+
+ try:
+ manifest = json.loads(text_content)
+ except ValueError:
+ raise AnsibleError(
+ 'Collection tar file member {member!s} does not '
+ 'contain a valid json string.'.
+ format(member=MANIFEST_FILENAME),
+ )
+ return manifest['collection_info']
+
+
+@contextmanager
+def _tarfile_extract(
+ tar, # type: tarfile.TarFile
+ member, # type: tarfile.TarInfo
+):
+ # type: (...) -> Iterator[Tuple[tarfile.TarInfo, Optional[IO[bytes]]]]
+ tar_obj = tar.extractfile(member)
+ try:
+ yield member, tar_obj
+ finally:
+ if tar_obj is not None:
+ tar_obj.close()
diff --git a/lib/ansible/galaxy/collection/galaxy_api_proxy.py b/lib/ansible/galaxy/collection/galaxy_api_proxy.py
new file mode 100644
index 00000000..fb4cd5de
--- /dev/null
+++ b/lib/ansible/galaxy/collection/galaxy_api_proxy.py
@@ -0,0 +1,107 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020-2021, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""A facade for interfacing with multiple Galaxy instances."""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+try:
+ from typing import TYPE_CHECKING
+except ImportError:
+ TYPE_CHECKING = False
+
+if TYPE_CHECKING:
+ from typing import Dict, Iterable, Tuple
+ from ansible.galaxy.api import CollectionVersionMetadata
+ from ansible.galaxy.collection.concrete_artifact_manager import (
+ ConcreteArtifactsManager,
+ )
+ from ansible.galaxy.dependency_resolution.dataclasses import (
+ Candidate, Requirement,
+ )
+
+from ansible.galaxy.api import GalaxyAPI, GalaxyError
+
+
+class MultiGalaxyAPIProxy:
+ """A proxy that abstracts talking to multiple Galaxy instances."""
+
+ def __init__(self, apis, concrete_artifacts_manager):
+ # type: (Iterable[GalaxyAPI], ConcreteArtifactsManager) -> None
+ """Initialize the target APIs list."""
+ self._apis = apis
+ self._concrete_art_mgr = concrete_artifacts_manager
+
+ def get_collection_versions(self, requirement):
+ # type: (Requirement) -> Iterable[Tuple[str, GalaxyAPI]]
+ """Get a set of unique versions for FQCN on Galaxy servers."""
+ if requirement.is_concrete_artifact:
+ return {
+ (
+ self._concrete_art_mgr.
+ get_direct_collection_version(requirement),
+ requirement.src,
+ ),
+ }
+
+ api_lookup_order = (
+ (requirement.src, )
+ if isinstance(requirement.src, GalaxyAPI)
+ else self._apis
+ )
+ return set(
+ (version, api)
+ for api in api_lookup_order
+ for version in api.get_collection_versions(
+ requirement.namespace, requirement.name,
+ )
+ )
+
+ def get_collection_version_metadata(self, collection_candidate):
+ # type: (Candidate) -> CollectionVersionMetadata
+ """Retrieve collection metadata of a given candidate."""
+
+ api_lookup_order = (
+ (collection_candidate.src, )
+ if isinstance(collection_candidate.src, GalaxyAPI)
+ else self._apis
+ )
+ for api in api_lookup_order:
+ try:
+ version_metadata = api.get_collection_version_metadata(
+ collection_candidate.namespace,
+ collection_candidate.name,
+ collection_candidate.ver,
+ )
+ except GalaxyError as api_err:
+ last_err = api_err
+ else:
+ self._concrete_art_mgr.save_collection_source(
+ collection_candidate,
+ version_metadata.download_url,
+ version_metadata.artifact_sha256,
+ api.token,
+ )
+ return version_metadata
+
+ raise last_err
+
+ def get_collection_dependencies(self, collection_candidate):
+ # type: (Candidate) -> Dict[str, str]
+ # FIXME: return Requirement instances instead?
+ """Retrieve collection dependencies of a given candidate."""
+ if collection_candidate.is_concrete_artifact:
+ return (
+ self.
+ _concrete_art_mgr.
+ get_direct_collection_dependencies
+ )(collection_candidate)
+
+ return (
+ self.
+ get_collection_version_metadata(collection_candidate).
+ dependencies
+ )
diff --git a/lib/ansible/galaxy/dependency_resolution/__init__.py b/lib/ansible/galaxy/dependency_resolution/__init__.py
new file mode 100644
index 00000000..5698d2b7
--- /dev/null
+++ b/lib/ansible/galaxy/dependency_resolution/__init__.py
@@ -0,0 +1,56 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020-2021, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""Dependency resolution machinery."""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ from typing import TYPE_CHECKING
+except ImportError:
+ TYPE_CHECKING = False
+
+if TYPE_CHECKING:
+ from typing import Iterable
+ from ansible.galaxy.api import GalaxyAPI
+ from ansible.galaxy.collection.concrete_artifact_manager import (
+ ConcreteArtifactsManager,
+ )
+ from ansible.galaxy.dependency_resolution.dataclasses import (
+ Candidate,
+ Requirement,
+ )
+
+from ansible.galaxy.collection.galaxy_api_proxy import MultiGalaxyAPIProxy
+from ansible.galaxy.dependency_resolution.providers import CollectionDependencyProvider
+from ansible.galaxy.dependency_resolution.reporters import CollectionDependencyReporter
+from ansible.galaxy.dependency_resolution.resolvers import CollectionDependencyResolver
+
+
+def build_collection_dependency_resolver(
+ galaxy_apis, # type: Iterable[GalaxyAPI]
+ concrete_artifacts_manager, # type: ConcreteArtifactsManager
+ user_requirements, # type: Iterable[Requirement]
+ preferred_candidates=None, # type: Iterable[Candidate]
+ with_deps=True, # type: bool
+ with_pre_releases=False, # type: bool
+ upgrade=False, # type: bool
+): # type: (...) -> CollectionDependencyResolver
+ """Return a collection dependency resolver.
+
+ The returned instance will have a ``resolve()`` method for
+ further consumption.
+ """
+ return CollectionDependencyResolver(
+ CollectionDependencyProvider(
+ apis=MultiGalaxyAPIProxy(galaxy_apis, concrete_artifacts_manager),
+ concrete_artifacts_manager=concrete_artifacts_manager,
+ user_requirements=user_requirements,
+ preferred_candidates=preferred_candidates,
+ with_deps=with_deps,
+ with_pre_releases=with_pre_releases,
+ upgrade=upgrade,
+ ),
+ CollectionDependencyReporter(),
+ )
diff --git a/lib/ansible/galaxy/dependency_resolution/dataclasses.py b/lib/ansible/galaxy/dependency_resolution/dataclasses.py
new file mode 100644
index 00000000..49de8c5f
--- /dev/null
+++ b/lib/ansible/galaxy/dependency_resolution/dataclasses.py
@@ -0,0 +1,427 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020-2021, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""Dependency structs."""
+# FIXME: add caching all over the place
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+from collections import namedtuple
+from glob import iglob
+
+try:
+ from typing import TYPE_CHECKING
+except ImportError:
+ TYPE_CHECKING = False
+
+if TYPE_CHECKING:
+ from typing import Tuple, Type, TypeVar
+ from ansible.galaxy.collection.concrete_artifact_manager import (
+ ConcreteArtifactsManager,
+ )
+ Collection = TypeVar(
+ 'Collection',
+ 'Candidate', 'Requirement',
+ '_ComputedReqKindsMixin',
+ )
+
+import yaml
+
+from ansible.errors import AnsibleError
+from ansible.galaxy.api import GalaxyAPI
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ansible.module_utils.six import raise_from
+from ansible.utils.collection_loader import AnsibleCollectionRef
+from ansible.utils.display import Display
+
+
+_ALLOW_CONCRETE_POINTER_IN_SOURCE = False # NOTE: This is a feature flag
+_GALAXY_YAML = b'galaxy.yml'
+_MANIFEST_JSON = b'MANIFEST.json'
+
+
+display = Display()
+
+
+def _is_collection_src_dir(dir_path):
+ b_dir_path = to_bytes(dir_path, errors='surrogate_or_strict')
+ return os.path.isfile(os.path.join(b_dir_path, _GALAXY_YAML))
+
+
+def _is_installed_collection_dir(dir_path):
+ b_dir_path = to_bytes(dir_path, errors='surrogate_or_strict')
+ return os.path.isfile(os.path.join(b_dir_path, _MANIFEST_JSON))
+
+
+def _is_collection_dir(dir_path):
+ return (
+ _is_installed_collection_dir(dir_path) or
+ _is_collection_src_dir(dir_path)
+ )
+
+
+def _find_collections_in_subdirs(dir_path):
+ b_dir_path = to_bytes(dir_path, errors='surrogate_or_strict')
+
+ subdir_glob_pattern = os.path.join(
+ b_dir_path,
+ # b'*', # namespace is supposed to be top-level per spec
+ b'*', # collection name
+ )
+
+ for subdir in iglob(subdir_glob_pattern):
+ if os.path.isfile(os.path.join(subdir, _MANIFEST_JSON)):
+ yield subdir
+ elif os.path.isfile(os.path.join(subdir, _GALAXY_YAML)):
+ yield subdir
+
+
+def _is_collection_namespace_dir(tested_str):
+ return any(_find_collections_in_subdirs(tested_str))
+
+
+def _is_file_path(tested_str):
+ return os.path.isfile(to_bytes(tested_str, errors='surrogate_or_strict'))
+
+
+def _is_http_url(tested_str):
+ return urlparse(tested_str).scheme.lower() in {'http', 'https'}
+
+
+def _is_git_url(tested_str):
+ return tested_str.startswith(('git+', 'git@'))
+
+
+def _is_concrete_artifact_pointer(tested_str):
+ return any(
+ predicate(tested_str)
+ for predicate in (
+ # NOTE: Maintain the checks to be sorted from light to heavy:
+ _is_git_url,
+ _is_http_url,
+ _is_file_path,
+ _is_collection_dir,
+ _is_collection_namespace_dir,
+ )
+ )
+
+
+class _ComputedReqKindsMixin:
+
+ @classmethod
+ def from_dir_path_as_unknown( # type: ignore[misc]
+ cls, # type: Type[Collection]
+ dir_path, # type: bytes
+ art_mgr, # type: ConcreteArtifactsManager
+ ): # type: (...) -> Collection
+ """Make collection from an unspecified dir type.
+
+ This alternative constructor attempts to grab metadata from the
+ given path if it's a directory. If there's no metadata, it
+ falls back to guessing the FQCN based on the directory path and
+ sets the version to "*".
+
+ It raises a ValueError immediatelly if the input is not an
+ existing directory path.
+ """
+ if not os.path.isdir(dir_path):
+ raise ValueError(
+ "The collection directory '{path!s}' doesn't exist".
+ format(path=to_native(dir_path)),
+ )
+
+ try:
+ return cls.from_dir_path(dir_path, art_mgr)
+ except ValueError:
+ return cls.from_dir_path_implicit(dir_path)
+
+ @classmethod
+ def from_dir_path(cls, dir_path, art_mgr):
+ """Make collection from an directory with metadata."""
+ b_dir_path = to_bytes(dir_path, errors='surrogate_or_strict')
+ if not _is_collection_dir(b_dir_path):
+ display.warning(
+ u"Collection at '{path!s}' does not have a {manifest_json!s} "
+ u'file, nor has it {galaxy_yml!s}: cannot detect version.'.
+ format(
+ galaxy_yml=to_text(_GALAXY_YAML),
+ manifest_json=to_text(_MANIFEST_JSON),
+ path=to_text(dir_path, errors='surrogate_or_strict'),
+ ),
+ )
+ raise ValueError(
+ '`dir_path` argument must be an installed or a source'
+ ' collection directory.',
+ )
+
+ tmp_inst_req = cls(None, None, dir_path, 'dir')
+ req_name = art_mgr.get_direct_collection_fqcn(tmp_inst_req)
+ req_version = art_mgr.get_direct_collection_version(tmp_inst_req)
+
+ return cls(req_name, req_version, dir_path, 'dir')
+
+ @classmethod
+ def from_dir_path_implicit( # type: ignore[misc]
+ cls, # type: Type[Collection]
+ dir_path, # type: bytes
+ ): # type: (...) -> Collection
+ """Construct a collection instance based on an arbitrary dir.
+
+ This alternative constructor infers the FQCN based on the parent
+ and current directory names. It also sets the version to "*"
+ regardless of whether any of known metadata files are present.
+ """
+ # There is no metadata, but it isn't required for a functional collection. Determine the namespace.name from the path.
+ u_dir_path = to_text(dir_path, errors='surrogate_or_strict')
+ path_list = u_dir_path.split(os.path.sep)
+ req_name = '.'.join(path_list[-2:])
+ return cls(req_name, '*', dir_path, 'dir') # type: ignore[call-arg]
+
+ @classmethod
+ def from_string(cls, collection_input, artifacts_manager):
+ req = {}
+ if _is_concrete_artifact_pointer(collection_input):
+ # Arg is a file path or URL to a collection
+ req['name'] = collection_input
+ else:
+ req['name'], _sep, req['version'] = collection_input.partition(':')
+ if not req['version']:
+ del req['version']
+
+ return cls.from_requirement_dict(req, artifacts_manager)
+
+ @classmethod
+ def from_requirement_dict(cls, collection_req, art_mgr):
+ req_name = collection_req.get('name', None)
+ req_version = collection_req.get('version', '*')
+ req_type = collection_req.get('type')
+ # TODO: decide how to deprecate the old src API behavior
+ req_source = collection_req.get('source', None)
+
+ if req_type is None:
+ if ( # FIXME: decide on the future behavior:
+ _ALLOW_CONCRETE_POINTER_IN_SOURCE
+ and req_source is not None
+ and _is_concrete_artifact_pointer(req_source)
+ ):
+ src_path = req_source
+ elif (
+ req_name is not None
+ and AnsibleCollectionRef.is_valid_collection_name(req_name)
+ ):
+ req_type = 'galaxy'
+ elif (
+ req_name is not None
+ and _is_concrete_artifact_pointer(req_name)
+ ):
+ src_path, req_name = req_name, None
+ else:
+ dir_tip_tmpl = ( # NOTE: leading LFs are for concat
+ '\n\nTip: Make sure you are pointing to the right '
+ 'subdirectory — `{src!s}` looks like a directory '
+ 'but it is neither a collection, nor a namespace '
+ 'dir.'
+ )
+
+ if req_source is not None and os.path.isdir(req_source):
+ tip = dir_tip_tmpl.format(src=req_source)
+ elif req_name is not None and os.path.isdir(req_name):
+ tip = dir_tip_tmpl.format(src=req_name)
+ elif req_name:
+ tip = '\n\nCould not find {0}.'.format(req_name)
+ else:
+ tip = ''
+
+ raise AnsibleError( # NOTE: I'd prefer a ValueError instead
+ 'Neither the collection requirement entry key '
+ "'name', nor 'source' point to a concrete "
+ "resolvable collection artifact. Also 'name' is "
+ 'not an FQCN. A valid collection name must be in '
+ 'the format <namespace>.<collection>. Please make '
+ 'sure that the namespace and the collection name '
+ ' contain characters from [a-zA-Z0-9_] only.'
+ '{extra_tip!s}'.format(extra_tip=tip),
+ )
+
+ if req_type is None:
+ if _is_git_url(src_path):
+ req_type = 'git'
+ req_source = src_path
+ elif _is_http_url(src_path):
+ req_type = 'url'
+ req_source = src_path
+ elif _is_file_path(src_path):
+ req_type = 'file'
+ req_source = src_path
+ elif _is_collection_dir(src_path):
+ if _is_installed_collection_dir(src_path) and _is_collection_src_dir(src_path):
+ # Note that ``download`` requires a dir with a ``galaxy.yml`` and fails if it
+ # doesn't exist, but if a ``MANIFEST.json`` also exists, it would be used
+ # instead of the ``galaxy.yml``.
+ raise AnsibleError(
+ u"Collection requirement at '{path!s}' has both a {manifest_json!s} "
+ u"file and a {galaxy_yml!s}.\nThe requirement must either be an installed "
+ u"collection directory or a source collection directory, not both.".
+ format(
+ path=to_text(src_path, errors='surrogate_or_strict'),
+ manifest_json=to_text(_MANIFEST_JSON),
+ galaxy_yml=to_text(_GALAXY_YAML),
+ )
+ )
+ req_type = 'dir'
+ req_source = src_path
+ elif _is_collection_namespace_dir(src_path):
+ req_name = None # No name for a virtual req or "namespace."?
+ req_type = 'subdirs'
+ req_source = src_path
+ else:
+ raise AnsibleError( # NOTE: this is never supposed to be hit
+ 'Failed to automatically detect the collection '
+ 'requirement type.',
+ )
+
+ if req_type not in {'file', 'galaxy', 'git', 'url', 'dir', 'subdirs'}:
+ raise AnsibleError(
+ "The collection requirement entry key 'type' must be "
+ 'one of file, galaxy, git, dir, subdirs, or url.'
+ )
+
+ if req_name is None and req_type == 'galaxy':
+ raise AnsibleError(
+ 'Collections requirement entry should contain '
+ "the key 'name' if it's requested from a Galaxy-like "
+ 'index server.',
+ )
+
+ if req_type != 'galaxy' and req_source is None:
+ req_source, req_name = req_name, None
+
+ if (
+ req_type == 'galaxy' and
+ isinstance(req_source, GalaxyAPI) and
+ not _is_http_url(req_source.api_server)
+ ):
+ raise AnsibleError(
+ "Collections requirement 'source' entry should contain "
+ 'a valid Galaxy API URL but it does not: {not_url!s} '
+ 'is not an HTTP URL.'.
+ format(not_url=req_source.api_server),
+ )
+
+ tmp_inst_req = cls(req_name, req_version, req_source, req_type)
+
+ if req_type not in {'galaxy', 'subdirs'} and req_name is None:
+ req_name = art_mgr.get_direct_collection_fqcn(tmp_inst_req) # TODO: fix the cache key in artifacts manager?
+
+ if req_type not in {'galaxy', 'subdirs'} and req_version == '*':
+ req_version = art_mgr.get_direct_collection_version(tmp_inst_req)
+
+ return cls(
+ req_name, req_version,
+ req_source, req_type,
+ )
+
+ def __repr__(self):
+ return (
+ '<{self!s} of type {coll_type!r} from {src!s}>'.
+ format(self=self, coll_type=self.type, src=self.src or 'Galaxy')
+ )
+
+ def __str__(self):
+ return to_native(self.__unicode__())
+
+ def __unicode__(self):
+ if self.fqcn is None:
+ return (
+ u'"virtual collection Git repo"' if self.is_scm
+ else u'"virtual collection namespace"'
+ )
+
+ return (
+ u'{fqcn!s}:{ver!s}'.
+ format(fqcn=to_text(self.fqcn), ver=to_text(self.ver))
+ )
+
+ def _get_separate_ns_n_name(self): # FIXME: use LRU cache
+ return self.fqcn.split('.')
+
+ @property
+ def namespace(self):
+ if self.is_virtual:
+ raise TypeError('Virtual collections do not have a namespace')
+
+ return self._get_separate_ns_n_name()[0]
+
+ @property
+ def name(self):
+ if self.is_virtual:
+ raise TypeError('Virtual collections do not have a name')
+
+ return self._get_separate_ns_n_name()[-1]
+
+ @property
+ def canonical_package_id(self):
+ if not self.is_virtual:
+ return to_native(self.fqcn)
+
+ return (
+ '<virtual namespace from {src!s} of type {src_type!s}>'.
+ format(src=to_native(self.src), src_type=to_native(self.type))
+ )
+
+ @property
+ def is_virtual(self):
+ return self.is_scm or self.is_subdirs
+
+ @property
+ def is_file(self):
+ return self.type == 'file'
+
+ @property
+ def is_dir(self):
+ return self.type == 'dir'
+
+ @property
+ def namespace_collection_paths(self):
+ return [
+ to_native(path)
+ for path in _find_collections_in_subdirs(self.src)
+ ]
+
+ @property
+ def is_subdirs(self):
+ return self.type == 'subdirs'
+
+ @property
+ def is_url(self):
+ return self.type == 'url'
+
+ @property
+ def is_scm(self):
+ return self.type == 'git'
+
+ @property
+ def is_concrete_artifact(self):
+ return self.type in {'git', 'url', 'file', 'dir', 'subdirs'}
+
+ @property
+ def is_online_index_pointer(self):
+ return not self.is_concrete_artifact
+
+
+class Requirement(
+ _ComputedReqKindsMixin,
+ namedtuple('Requirement', ('fqcn', 'ver', 'src', 'type')),
+):
+ """An abstract requirement request."""
+
+
+class Candidate(
+ _ComputedReqKindsMixin,
+ namedtuple('Candidate', ('fqcn', 'ver', 'src', 'type'))
+):
+ """A concrete collection candidate with its version resolved."""
diff --git a/lib/ansible/galaxy/dependency_resolution/errors.py b/lib/ansible/galaxy/dependency_resolution/errors.py
new file mode 100644
index 00000000..f5339a4d
--- /dev/null
+++ b/lib/ansible/galaxy/dependency_resolution/errors.py
@@ -0,0 +1,12 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020-2021, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""Dependency resolution exceptions."""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from resolvelib.resolvers import (
+ ResolutionImpossible as CollectionDependencyResolutionImpossible,
+ InconsistentCandidate as CollectionDependencyInconsistentCandidate,
+)
diff --git a/lib/ansible/galaxy/dependency_resolution/providers.py b/lib/ansible/galaxy/dependency_resolution/providers.py
new file mode 100644
index 00000000..35b2ceda
--- /dev/null
+++ b/lib/ansible/galaxy/dependency_resolution/providers.py
@@ -0,0 +1,332 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020-2021, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""Requirement provider interfaces."""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import functools
+
+try:
+ from typing import TYPE_CHECKING
+except ImportError:
+ TYPE_CHECKING = False
+
+if TYPE_CHECKING:
+ from typing import Iterable, List, NamedTuple, Optional, Union
+ from ansible.galaxy.collection.concrete_artifact_manager import (
+ ConcreteArtifactsManager,
+ )
+ from ansible.galaxy.collection.galaxy_api_proxy import MultiGalaxyAPIProxy
+
+from ansible.galaxy.dependency_resolution.dataclasses import (
+ Candidate,
+ Requirement,
+)
+from ansible.galaxy.dependency_resolution.versioning import (
+ is_pre_release,
+ meets_requirements,
+)
+from ansible.utils.version import SemanticVersion
+
+from resolvelib import AbstractProvider
+
+
+class CollectionDependencyProvider(AbstractProvider):
+ """Delegate providing a requirement interface for the resolver."""
+
+ def __init__(
+ self, # type: CollectionDependencyProvider
+ apis, # type: MultiGalaxyAPIProxy
+ concrete_artifacts_manager=None, # type: ConcreteArtifactsManager
+ user_requirements=None, # type: Iterable[Requirement]
+ preferred_candidates=None, # type: Iterable[Candidate]
+ with_deps=True, # type: bool
+ with_pre_releases=False, # type: bool
+ upgrade=False, # type: bool
+ ): # type: (...) -> None
+ r"""Initialize helper attributes.
+
+ :param api: An instance of the multiple Galaxy APIs wrapper.
+
+ :param concrete_artifacts_manager: An instance of the caching \
+ concrete artifacts manager.
+
+ :param with_deps: A flag specifying whether the resolver \
+ should attempt to pull-in the deps of the \
+ requested requirements. On by default.
+
+ :param with_pre_releases: A flag specifying whether the \
+ resolver should skip pre-releases. \
+ Off by default.
+ """
+ self._api_proxy = apis
+ self._make_req_from_dict = functools.partial(
+ Requirement.from_requirement_dict,
+ art_mgr=concrete_artifacts_manager,
+ )
+ self._pinned_candidate_requests = set(
+ Candidate(req.fqcn, req.ver, req.src, req.type)
+ for req in (user_requirements or ())
+ if req.is_concrete_artifact or (
+ req.ver != '*' and
+ not req.ver.startswith(('<', '>', '!='))
+ )
+ )
+ self._preferred_candidates = set(preferred_candidates or ())
+ self._with_deps = with_deps
+ self._with_pre_releases = with_pre_releases
+ self._upgrade = upgrade
+
+ def _is_user_requested(self, candidate): # type: (Candidate) -> bool
+ """Check if the candidate is requested by the user."""
+ if candidate in self._pinned_candidate_requests:
+ return True
+
+ if candidate.is_online_index_pointer and candidate.src is not None:
+ # NOTE: Candidate is a namedtuple, it has a source server set
+ # NOTE: to a specific GalaxyAPI instance or `None`. When the
+ # NOTE: user runs
+ # NOTE:
+ # NOTE: $ ansible-galaxy collection install ns.coll
+ # NOTE:
+ # NOTE: then it's saved in `self._pinned_candidate_requests`
+ # NOTE: as `('ns.coll', '*', None, 'galaxy')` but then
+ # NOTE: `self.find_matches()` calls `self.is_satisfied_by()`
+ # NOTE: with Candidate instances bound to each specific
+ # NOTE: server available, those look like
+ # NOTE: `('ns.coll', '*', GalaxyAPI(...), 'galaxy')` and
+ # NOTE: wouldn't match the user requests saved in
+ # NOTE: `self._pinned_candidate_requests`. This is why we
+ # NOTE: normalize the collection to have `src=None` and try
+ # NOTE: again.
+ # NOTE:
+ # NOTE: When the user request comes from `requirements.yml`
+ # NOTE: with the `source:` set, it'll match the first check
+ # NOTE: but it still can have entries with `src=None` so this
+ # NOTE: normalized check is still necessary.
+ return Candidate(
+ candidate.fqcn, candidate.ver, None, candidate.type,
+ ) in self._pinned_candidate_requests
+
+ return False
+
+ def identify(self, requirement_or_candidate):
+ # type: (Union[Candidate, Requirement]) -> str
+ """Given requirement or candidate, return an identifier for it.
+
+ This is used to identify a requirement or candidate, e.g.
+ whether two requirements should have their specifier parts
+ (version ranges or pins) merged, whether two candidates would
+ conflict with each other (because they have same name but
+ different versions).
+ """
+ return requirement_or_candidate.canonical_package_id
+
+ def get_preference(
+ self, # type: CollectionDependencyProvider
+ resolution, # type: Optional[Candidate]
+ candidates, # type: List[Candidate]
+ information, # type: List[NamedTuple]
+ ): # type: (...) -> Union[float, int]
+ """Return sort key function return value for given requirement.
+
+ This result should be based on preference that is defined as
+ "I think this requirement should be resolved first".
+ The lower the return value is, the more preferred this
+ group of arguments is.
+
+ :param resolution: Currently pinned candidate, or ``None``.
+
+ :param candidates: A list of possible candidates.
+
+ :param information: A list of requirement information.
+
+ Each ``information`` instance is a named tuple with two entries:
+
+ * ``requirement`` specifies a requirement contributing to
+ the current candidate list
+
+ * ``parent`` specifies the candidate that provides
+ (dependend on) the requirement, or `None`
+ to indicate a root requirement.
+
+ The preference could depend on a various of issues, including
+ (not necessarily in this order):
+
+ * Is this package pinned in the current resolution result?
+
+ * How relaxed is the requirement? Stricter ones should
+ probably be worked on first? (I don't know, actually.)
+
+ * How many possibilities are there to satisfy this
+ requirement? Those with few left should likely be worked on
+ first, I guess?
+
+ * Are there any known conflicts for this requirement?
+ We should probably work on those with the most
+ known conflicts.
+
+ A sortable value should be returned (this will be used as the
+ `key` parameter of the built-in sorting function). The smaller
+ the value is, the more preferred this requirement is (i.e. the
+ sorting function is called with ``reverse=False``).
+ """
+ if any(
+ candidate in self._preferred_candidates
+ for candidate in candidates
+ ):
+ # NOTE: Prefer pre-installed candidates over newer versions
+ # NOTE: available from Galaxy or other sources.
+ return float('-inf')
+ return len(candidates)
+
+ def find_matches(self, requirements):
+ # type: (List[Requirement]) -> List[Candidate]
+ r"""Find all possible candidates satisfying given requirements.
+
+ This tries to get candidates based on the requirements' types.
+
+ For concrete requirements (SCM, dir, namespace dir, local or
+ remote archives), the one-and-only match is returned
+
+ For a "named" requirement, Galaxy-compatible APIs are consulted
+ to find concrete candidates for this requirement. Of theres a
+ pre-installed candidate, it's prepended in front of others.
+
+ :param requirements: A collection of requirements which all of \
+ the returned candidates must match. \
+ All requirements are guaranteed to have \
+ the same identifier. \
+ The collection is never empty.
+
+ :returns: An iterable that orders candidates by preference, \
+ e.g. the most preferred candidate comes first.
+ """
+ # FIXME: The first requirement may be a Git repo followed by
+ # FIXME: its cloned tmp dir. Using only the first one creates
+ # FIXME: loops that prevent any further dependency exploration.
+ # FIXME: We need to figure out how to prevent this.
+ first_req = requirements[0]
+ fqcn = first_req.fqcn
+ # The fqcn is guaranteed to be the same
+ coll_versions = self._api_proxy.get_collection_versions(first_req)
+ if first_req.is_concrete_artifact:
+ # FIXME: do we assume that all the following artifacts are also concrete?
+ # FIXME: does using fqcn==None cause us problems here?
+
+ return [
+ Candidate(fqcn, version, _none_src_server, first_req.type)
+ for version, _none_src_server in coll_versions
+ ]
+
+ latest_matches = sorted(
+ {
+ candidate for candidate in (
+ Candidate(fqcn, version, src_server, 'galaxy')
+ for version, src_server in coll_versions
+ )
+ if all(self.is_satisfied_by(requirement, candidate) for requirement in requirements)
+ # FIXME
+ # if all(self.is_satisfied_by(requirement, candidate) and (
+ # requirement.src is None or # if this is true for some candidates but not all it will break key param - Nonetype can't be compared to str
+ # requirement.src == candidate.src
+ # ))
+ },
+ key=lambda candidate: (
+ SemanticVersion(candidate.ver), candidate.src,
+ ),
+ reverse=True, # prefer newer versions over older ones
+ )
+
+ preinstalled_candidates = {
+ candidate for candidate in self._preferred_candidates
+ if candidate.fqcn == fqcn and
+ (
+ # check if an upgrade is necessary
+ all(self.is_satisfied_by(requirement, candidate) for requirement in requirements) and
+ (
+ not self._upgrade or
+ # check if an upgrade is preferred
+ all(SemanticVersion(latest.ver) <= SemanticVersion(candidate.ver) for latest in latest_matches)
+ )
+ )
+ }
+
+ return list(preinstalled_candidates) + latest_matches
+
+ def is_satisfied_by(self, requirement, candidate):
+ # type: (Requirement, Candidate) -> bool
+ r"""Whether the given requirement is satisfiable by a candidate.
+
+ :param requirement: A requirement that produced the `candidate`.
+
+ :param candidate: A pinned candidate supposedly matchine the \
+ `requirement` specifier. It is guaranteed to \
+ have been generated from the `requirement`.
+
+ :returns: Indication whether the `candidate` is a viable \
+ solution to the `requirement`.
+ """
+ # NOTE: Only allow pre-release candidates if we want pre-releases
+ # NOTE: or the req ver was an exact match with the pre-release
+ # NOTE: version. Another case where we'd want to allow
+ # NOTE: pre-releases is when there are several user requirements
+ # NOTE: and one of them is a pre-release that also matches a
+ # NOTE: transitive dependency of another requirement.
+ allow_pre_release = self._with_pre_releases or not (
+ requirement.ver == '*' or
+ requirement.ver.startswith('<') or
+ requirement.ver.startswith('>') or
+ requirement.ver.startswith('!=')
+ ) or self._is_user_requested(candidate)
+ if is_pre_release(candidate.ver) and not allow_pre_release:
+ return False
+
+ # NOTE: This is a set of Pipenv-inspired optimizations. Ref:
+ # https://github.com/sarugaku/passa/blob/2ac00f1/src/passa/models/providers.py#L58-L74
+ if (
+ requirement.is_virtual or
+ candidate.is_virtual or
+ requirement.ver == '*'
+ ):
+ return True
+
+ return meets_requirements(
+ version=candidate.ver,
+ requirements=requirement.ver,
+ )
+
+ def get_dependencies(self, candidate):
+ # type: (Candidate) -> List[Candidate]
+ r"""Get direct dependencies of a candidate.
+
+ :returns: A collection of requirements that `candidate` \
+ specifies as its dependencies.
+ """
+ # FIXME: If there's several galaxy servers set, there may be a
+ # FIXME: situation when the metadata of the same collection
+ # FIXME: differs. So how do we resolve this case? Priority?
+ # FIXME: Taking into account a pinned hash? Exploding on
+ # FIXME: any differences?
+ # NOTE: The underlying implmentation currently uses first found
+ req_map = self._api_proxy.get_collection_dependencies(candidate)
+
+ # NOTE: This guard expression MUST perform an early exit only
+ # NOTE: after the `get_collection_dependencies()` call because
+ # NOTE: internally it polulates the artifact URL of the candidate,
+ # NOTE: its SHA hash and the Galaxy API token. These are still
+ # NOTE: necessary with `--no-deps` because even with the disabled
+ # NOTE: dependency resolution the outer layer will still need to
+ # NOTE: know how to download and validate the artifact.
+ #
+ # NOTE: Virtual candidates should always return dependencies
+ # NOTE: because they are ephemeral and non-installable.
+ if not self._with_deps and not candidate.is_virtual:
+ return []
+
+ return [
+ self._make_req_from_dict({'name': dep_name, 'version': dep_req})
+ for dep_name, dep_req in req_map.items()
+ ]
diff --git a/lib/ansible/galaxy/dependency_resolution/reporters.py b/lib/ansible/galaxy/dependency_resolution/reporters.py
new file mode 100644
index 00000000..d8eacb70
--- /dev/null
+++ b/lib/ansible/galaxy/dependency_resolution/reporters.py
@@ -0,0 +1,17 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020-2021, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""Requiement reporter implementations."""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from resolvelib import BaseReporter
+
+
+class CollectionDependencyReporter(BaseReporter):
+ """A dependency reporter for Ansible Collections.
+
+ This is a proxy class allowing us to abstract away importing resolvelib
+ outside of the `ansible.galaxy.dependency_resolution` Python package.
+ """
diff --git a/lib/ansible/galaxy/dependency_resolution/resolvers.py b/lib/ansible/galaxy/dependency_resolution/resolvers.py
new file mode 100644
index 00000000..1b3e30ff
--- /dev/null
+++ b/lib/ansible/galaxy/dependency_resolution/resolvers.py
@@ -0,0 +1,17 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020-2021, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""Requirement resolver implementations."""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from resolvelib import Resolver
+
+
+class CollectionDependencyResolver(Resolver):
+ """A dependency resolver for Ansible Collections.
+
+ This is a proxy class allowing us to abstract away importing resolvelib
+ outside of the `ansible.galaxy.dependency_resolution` Python package.
+ """
diff --git a/lib/ansible/galaxy/dependency_resolution/versioning.py b/lib/ansible/galaxy/dependency_resolution/versioning.py
new file mode 100644
index 00000000..c57f0d21
--- /dev/null
+++ b/lib/ansible/galaxy/dependency_resolution/versioning.py
@@ -0,0 +1,70 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2019-2020, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""Version comparison helpers."""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import operator
+from distutils.version import LooseVersion
+
+from ansible.utils.version import SemanticVersion
+
+
+def is_pre_release(version):
+ # type: (str) -> bool
+ """Figure out if a given version is a pre-release."""
+ try:
+ return SemanticVersion(version).is_prerelease
+ except ValueError:
+ return False
+
+
+def meets_requirements(version, requirements):
+ # type: (str, str) -> bool
+ """Verify if a given version satisfies all the requirements.
+
+ Supported version identifiers are:
+ * '=='
+ * '!='
+ * '>'
+ * '>='
+ * '<'
+ * '<='
+ * '*'
+
+ Each requirement is delimited by ','.
+ """
+ op_map = {
+ '!=': operator.ne,
+ '==': operator.eq,
+ '=': operator.eq,
+ '>=': operator.ge,
+ '>': operator.gt,
+ '<=': operator.le,
+ '<': operator.lt,
+ }
+
+ for req in requirements.split(','):
+ op_pos = 2 if len(req) > 1 and req[1] == '=' else 1
+ op = op_map.get(req[:op_pos])
+
+ requirement = req[op_pos:]
+ if not op:
+ requirement = req
+ op = operator.eq
+
+ if requirement == '*' or version == '*':
+ continue
+
+ if not op(
+ SemanticVersion(version),
+ SemanticVersion.from_loose_version(LooseVersion(requirement)),
+ ):
+ break
+ else:
+ return True
+
+ # The loop was broken early, it does not meet all the requirements
+ return False
diff --git a/lib/ansible/galaxy/role.py b/lib/ansible/galaxy/role.py
index 7de44ded..391df725 100644
--- a/lib/ansible/galaxy/role.py
+++ b/lib/ansible/galaxy/role.py
@@ -334,7 +334,11 @@ class GalaxyRole(object):
n_parts = n_member_name.replace(n_archive_parent_dir, "", 1).split(os.sep)
n_final_parts = []
for n_part in n_parts:
- if n_part != '..' and '~' not in n_part and '$' not in n_part:
+ # TODO if the condition triggers it produces a broken installation.
+ # It will create the parent directory as an empty file and will
+ # explode if the directory contains valid files.
+ # Leaving this as is since the whole module needs a rewrite.
+ if n_part != '..' and not n_part.startswith('~') and '$' not in n_part:
n_final_parts.append(n_part)
member.name = os.path.join(*n_final_parts)
role_tar_file.extract(member, to_native(self.path))
diff --git a/lib/ansible/galaxy/token.py b/lib/ansible/galaxy/token.py
index 7231c8f9..52bff211 100644
--- a/lib/ansible/galaxy/token.py
+++ b/lib/ansible/galaxy/token.py
@@ -120,7 +120,7 @@ class GalaxyToken(object):
def _read(self):
action = 'Opened'
if not os.path.isfile(self.b_file):
- # token file not found, create and chomd u+rw
+ # token file not found, create and chmod u+rw
open(self.b_file, 'w').close()
os.chmod(self.b_file, S_IRUSR | S_IWUSR) # owner has +rw
action = 'Created'
@@ -130,6 +130,10 @@ class GalaxyToken(object):
display.vvv('%s %s' % (action, to_text(self.b_file)))
+ if config and not isinstance(config, dict):
+ display.vvv('Galaxy token file %s malformed, unable to read it' % to_text(self.b_file))
+ return {}
+
return config or {}
def set(self, token):
diff --git a/lib/ansible/inventory/data.py b/lib/ansible/inventory/data.py
index df4af766..cf6fbb66 100644
--- a/lib/ansible/inventory/data.py
+++ b/lib/ansible/inventory/data.py
@@ -51,6 +51,7 @@ class InventoryData(object):
self.localhost = None
self.current_source = None
+ self.processed_sources = []
# Always create the 'all' and 'ungrouped' groups,
for group in ('all', 'ungrouped'):
@@ -64,6 +65,7 @@ class InventoryData(object):
'hosts': self.hosts,
'local': self.localhost,
'source': self.current_source,
+ 'processed_sources': self.processed_sources
}
return data
@@ -73,6 +75,7 @@ class InventoryData(object):
self.groups = data.get('groups')
self.localhost = data.get('local')
self.current_source = data.get('source')
+ self.processed_sources = data.get('processed_sources')
def _create_implicit_localhost(self, pattern):
diff --git a/lib/ansible/inventory/group.py b/lib/ansible/inventory/group.py
index e7878d35..ed02fb4c 100644
--- a/lib/ansible/inventory/group.py
+++ b/lib/ansible/inventory/group.py
@@ -45,7 +45,6 @@ def to_safe_group_name(name, replacer="_", force=False, silent=False):
else:
if C.TRANSFORM_INVALID_GROUP_CHARS == 'never':
display.vvvv('Not replacing %s' % msg)
- warn = True
warn = 'Invalid characters were found in group names but not replaced, use -vvvv to see details'
if warn:
diff --git a/lib/ansible/inventory/manager.py b/lib/ansible/inventory/manager.py
index 27b30da4..aabc75cc 100644
--- a/lib/ansible/inventory/manager.py
+++ b/lib/ansible/inventory/manager.py
@@ -140,7 +140,7 @@ def split_host_pattern(pattern):
class InventoryManager(object):
''' Creates and manages inventory '''
- def __init__(self, loader, sources=None):
+ def __init__(self, loader, sources=None, parse=True):
# base objects
self._loader = loader
@@ -163,7 +163,8 @@ class InventoryManager(object):
self._sources = sources
# get to work!
- self.parse_sources(cache=True)
+ if parse:
+ self.parse_sources(cache=True)
@property
def localhost(self):
@@ -306,9 +307,11 @@ class InventoryManager(object):
else:
display.vvv("%s declined parsing %s as it did not pass its verify_file() method" % (plugin_name, source))
- if not parsed:
+ if parsed:
+ self._inventory.processed_sources.append(self._inventory.current_source)
+ else:
# only warn/error if NOT using the default or using it and the file is present
- # TODO: handle 'non file' inventorya and detect vs hardcode default
+ # TODO: handle 'non file' inventory and detect vs hardcode default
if source != '/etc/ansible/hosts' or os.path.exists(source):
if failures:
@@ -318,7 +321,7 @@ class InventoryManager(object):
if 'tb' in fail:
display.vvv(to_text(fail['tb']))
- # final erorr/warning on inventory source failure
+ # final error/warning on inventory source failure
if C.INVENTORY_ANY_UNPARSED_IS_FAILED:
raise AnsibleError(u'Completely failed to parse inventory source %s' % (source))
else:
diff --git a/lib/ansible/keyword_desc.yml b/lib/ansible/keyword_desc.yml
new file mode 100644
index 00000000..3139b5a6
--- /dev/null
+++ b/lib/ansible/keyword_desc.yml
@@ -0,0 +1,79 @@
+accelerate: "*DEPRECATED*, set to True to use accelerate connection plugin."
+accelerate_ipv6: "*DEPRECATED*, set to True to force accelerate plugin to use ipv6 for its connection."
+accelerate_port: "*DEPRECATED*, set to override default port use for accelerate connection."
+action: "The 'action' to execute for a task, it normally translates into a C(module) or action plugin."
+args: "A secondary way to add arguments into a task. Takes a dictionary in which keys map to options and values."
+always: List of tasks, in a block, that execute no matter if there is an error in the block or not.
+any_errors_fatal: Force any un-handled task errors on any host to propagate to all hosts and end the play.
+async: Run a task asynchronously if the C(action) supports this; value is maximum runtime in seconds.
+become: Boolean that controls if privilege escalation is used or not on :term:`Task` execution. Implemented by the become plugin. See :ref:`become_plugins`.
+become_exe: Path to the executable used to elevate privileges. Implemented by the become plugin. See :ref:`become_plugins`.
+become_flags: A string of flag(s) to pass to the privilege escalation program when :term:`become` is True.
+become_method: Which method of privilege escalation to use (such as sudo or su).
+become_user: "User that you 'become' after using privilege escalation. The remote/login user must have permissions to become this user."
+block: List of tasks in a block.
+changed_when: "Conditional expression that overrides the task's normal 'changed' status."
+check_mode: A boolean that controls if a task is executed in 'check' mode. See :ref:`check_mode_dry`.
+collections: |
+
+ List of collection namespaces to search for modules, plugins, and roles. See :ref:`collections_using_playbook`
+
+ .. note::
+
+ Tasks within a role do not inherit the value of ``collections`` from the play. To have a role search a list of collections, use the ``collections`` keyword in ``meta/main.yml`` within a role.
+
+
+connection: Allows you to change the connection plugin used for tasks to execute on the target. See :ref:`using_connection`.
+debugger: Enable debugging tasks based on state of the task result. See :ref:`playbook_debugger`.
+delay: Number of seconds to delay between retries. This setting is only used in combination with :term:`until`.
+delegate_facts: Boolean that allows you to apply facts to a delegated host instead of inventory_hostname.
+delegate_to: Host to execute task instead of the target (inventory_hostname). Connection vars from the delegated host will also be used for the task.
+diff: "Toggle to make tasks return 'diff' information or not."
+environment: A dictionary that gets converted into environment vars to be provided for the task upon execution. This can ONLY be used with modules. This isn't supported for any other type of plugins nor Ansible itself nor its configuration, it just sets the variables for the code responsible for executing the task. This is not a recommended way to pass in confidential data.
+fact_path: Set the fact path option for the fact gathering plugin controlled by :term:`gather_facts`.
+failed_when: "Conditional expression that overrides the task's normal 'failed' status."
+force_handlers: Will force notified handler execution for hosts even if they failed during the play. Will not trigger if the play itself fails.
+gather_facts: "A boolean that controls if the play will automatically run the 'setup' task to gather facts for the hosts."
+gather_subset: Allows you to pass subset options to the fact gathering plugin controlled by :term:`gather_facts`.
+gather_timeout: Allows you to set the timeout for the fact gathering plugin controlled by :term:`gather_facts`.
+handlers: "A section with tasks that are treated as handlers, these won't get executed normally, only when notified after each section of tasks is complete. A handler's `listen` field is not templatable."
+hosts: "A list of groups, hosts or host pattern that translates into a list of hosts that are the play's target."
+ignore_errors: Boolean that allows you to ignore task failures and continue with play. It does not affect connection errors.
+ignore_unreachable: Boolean that allows you to ignore task failures due to an unreachable host and continue with the play. This does not affect other task errors (see :term:`ignore_errors`) but is useful for groups of volatile/ephemeral hosts.
+loop: "Takes a list for the task to iterate over, saving each list element into the ``item`` variable (configurable via loop_control)"
+loop_control: |
+ Several keys here allow you to modify/set loop behaviour in a task.
+
+ .. seealso:: :ref:`loop_control`
+
+max_fail_percentage: can be used to abort the run after a given percentage of hosts in the current batch has failed. This only wokrs on linear or linear derived strategies.
+module_defaults: Specifies default parameter values for modules.
+name: "Identifier. Can be used for documentation, or in tasks/handlers."
+no_log: Boolean that controls information disclosure.
+notify: "List of handlers to notify when the task returns a 'changed=True' status."
+order: Controls the sorting of hosts as they are used for executing the play. Possible values are inventory (default), sorted, reverse_sorted, reverse_inventory and shuffle.
+poll: Sets the polling interval in seconds for async tasks (default 10s).
+port: Used to override the default port used in a connection.
+post_tasks: A list of tasks to execute after the :term:`tasks` section.
+pre_tasks: A list of tasks to execute before :term:`roles`.
+remote_user: User used to log into the target via the connection plugin.
+register: Name of variable that will contain task status and module return data.
+rescue: List of tasks in a :term:`block` that run if there is a task error in the main :term:`block` list.
+retries: "Number of retries before giving up in a :term:`until` loop. This setting is only used in combination with :term:`until`."
+roles: List of roles to be imported into the play
+run_once: Boolean that will bypass the host loop, forcing the task to attempt to execute on the first host available and afterwards apply any results and facts to all active hosts in the same batch.
+serial: |
+ Explicitly define how Ansible batches the execution of the current play on the play's target
+
+ .. seealso:: :ref:`rolling_update_batch_size`
+
+strategy: Allows you to choose the connection plugin to use for the play.
+tags: Tags applied to the task or included tasks, this allows selecting subsets of tasks from the command line.
+tasks: Main list of tasks to execute in the play, they run after :term:`roles` and before :term:`post_tasks`.
+timeout: Time limit for task to execute in, if exceeded Ansible will interrupt and fail the task.
+throttle: Limit number of concurrent task runs on task, block and playbook level. This is independent of the forks and serial settings, but cannot be set higher than those limits. For example, if forks is set to 10 and the throttle is set to 15, at most 10 hosts will be operated on in parallel.
+until: "This keyword implies a ':term:`retries` loop' that will go on until the condition supplied here is met or we hit the :term:`retries` limit."
+vars: Dictionary/map of variables
+vars_files: List of files that contain vars to include in the play.
+vars_prompt: list of variables to prompt for.
+when: Conditional expression, determines if an iteration of a task is run or not.
diff --git a/lib/ansible/module_utils/api.py b/lib/ansible/module_utils/api.py
index 46a036d3..e780ec6b 100644
--- a/lib/ansible/module_utils/api.py
+++ b/lib/ansible/module_utils/api.py
@@ -26,6 +26,8 @@ The 'api' module provides the following common argument specs:
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+import functools
+import random
import sys
import time
@@ -114,3 +116,51 @@ def retry(retries=None, retry_pause=1):
return retried
return wrapper
+
+
+def generate_jittered_backoff(retries=10, delay_base=3, delay_threshold=60):
+ """The "Full Jitter" backoff strategy.
+
+ Ref: https://www.awsarchitectureblog.com/2015/03/backoff.html
+
+ :param retries: The number of delays to generate.
+ :param delay_base: The base time in seconds used to calculate the exponential backoff.
+ :param delay_threshold: The maximum time in seconds for any delay.
+ """
+ for retry in range(0, retries):
+ yield random.randint(0, min(delay_threshold, delay_base * 2 ** retry))
+
+
+def retry_never(exception_or_result):
+ return False
+
+
+def retry_with_delays_and_condition(backoff_iterator, should_retry_error=None):
+ """Generic retry decorator.
+
+ :param backoff_iterator: An iterable of delays in seconds.
+ :param should_retry_error: A callable that takes an exception of the decorated function and decides whether to retry or not (returns a bool).
+ """
+ if should_retry_error is None:
+ should_retry_error = retry_never
+
+ def function_wrapper(function):
+ @functools.wraps(function)
+ def run_function(*args, **kwargs):
+ """This assumes the function has not already been called.
+ If backoff_iterator is empty, we should still run the function a single time with no delay.
+ """
+ call_retryable_function = functools.partial(function, *args, **kwargs)
+
+ for delay in backoff_iterator:
+ try:
+ return call_retryable_function()
+ except Exception as e:
+ if not should_retry_error(e):
+ raise
+ time.sleep(delay)
+
+ # Only or final attempt
+ return call_retryable_function()
+ return run_function
+ return function_wrapper
diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py
index 333c5878..3810dc22 100644
--- a/lib/ansible/module_utils/basic.py
+++ b/lib/ansible/module_utils/basic.py
@@ -55,7 +55,6 @@ import time
import traceback
import types
-from collections import deque
from itertools import chain, repeat
try:
@@ -74,7 +73,7 @@ except ImportError:
HAVE_SELINUX = False
try:
- import selinux
+ import ansible.module_utils.compat.selinux as selinux
HAVE_SELINUX = True
except ImportError:
pass
@@ -91,6 +90,8 @@ from ansible.module_utils.common.text.converters import (
container_to_text as json_dict_bytes_to_unicode,
)
+from ansible.module_utils.common.arg_spec import ModuleArgumentSpecValidator
+
from ansible.module_utils.common.text.formatters import (
lenient_lowercase,
bytes_to_human,
@@ -156,13 +157,15 @@ from ansible.module_utils.common.sys_info import (
)
from ansible.module_utils.pycompat24 import get_exception, literal_eval
from ansible.module_utils.common.parameters import (
- handle_aliases,
- list_deprecations,
- list_no_log_values,
+ env_fallback,
+ remove_values,
+ sanitize_keys,
+ DEFAULT_TYPE_VALIDATORS,
PASS_VARS,
PASS_BOOLS,
)
+from ansible.module_utils.errors import AnsibleFallbackNotFound, AnsibleValidationErrorMultiple, UnsupportedError
from ansible.module_utils.six import (
PY2,
PY3,
@@ -176,24 +179,6 @@ from ansible.module_utils.six import (
from ansible.module_utils.six.moves import map, reduce, shlex_quote
from ansible.module_utils.common.validation import (
check_missing_parameters,
- check_mutually_exclusive,
- check_required_arguments,
- check_required_by,
- check_required_if,
- check_required_one_of,
- check_required_together,
- count_terms,
- check_type_bool,
- check_type_bits,
- check_type_bytes,
- check_type_float,
- check_type_int,
- check_type_jsonarg,
- check_type_list,
- check_type_dict,
- check_type_path,
- check_type_raw,
- check_type_str,
safe_eval,
)
from ansible.module_utils.common._utils import get_all_subclasses as _get_all_subclasses
@@ -237,6 +222,7 @@ _literal_eval = literal_eval
# is an internal implementation detail
_ANSIBLE_ARGS = None
+
FILE_COMMON_ARGUMENTS = dict(
# These are things we want. About setting metadata (mode, ownership, permissions in general) on
# created files (these are used by set_fs_attributes_if_different and included in
@@ -249,7 +235,7 @@ FILE_COMMON_ARGUMENTS = dict(
selevel=dict(type='str'),
setype=dict(type='str'),
attributes=dict(type='str', aliases=['attr']),
- unsafe_writes=dict(type='bool', default=False), # should be available to any module using atomic_move
+ unsafe_writes=dict(type='bool', default=False, fallback=(env_fallback, ['ANSIBLE_UNSAFE_WRITES'])), # should be available to any module using atomic_move
)
PASSWD_ARG_RE = re.compile(r'^[-]{0,2}pass[-]?(word|wd)?')
@@ -308,212 +294,6 @@ def get_all_subclasses(cls):
# End compat shims
-def _remove_values_conditions(value, no_log_strings, deferred_removals):
- """
- Helper function for :meth:`remove_values`.
-
- :arg value: The value to check for strings that need to be stripped
- :arg no_log_strings: set of strings which must be stripped out of any values
- :arg deferred_removals: List which holds information about nested
- containers that have to be iterated for removals. It is passed into
- this function so that more entries can be added to it if value is
- a container type. The format of each entry is a 2-tuple where the first
- element is the ``value`` parameter and the second value is a new
- container to copy the elements of ``value`` into once iterated.
- :returns: if ``value`` is a scalar, returns ``value`` with two exceptions:
- 1. :class:`~datetime.datetime` objects which are changed into a string representation.
- 2. objects which are in no_log_strings are replaced with a placeholder
- so that no sensitive data is leaked.
- If ``value`` is a container type, returns a new empty container.
-
- ``deferred_removals`` is added to as a side-effect of this function.
-
- .. warning:: It is up to the caller to make sure the order in which value
- is passed in is correct. For instance, higher level containers need
- to be passed in before lower level containers. For example, given
- ``{'level1': {'level2': 'level3': [True]} }`` first pass in the
- dictionary for ``level1``, then the dict for ``level2``, and finally
- the list for ``level3``.
- """
- if isinstance(value, (text_type, binary_type)):
- # Need native str type
- native_str_value = value
- if isinstance(value, text_type):
- value_is_text = True
- if PY2:
- native_str_value = to_bytes(value, errors='surrogate_or_strict')
- elif isinstance(value, binary_type):
- value_is_text = False
- if PY3:
- native_str_value = to_text(value, errors='surrogate_or_strict')
-
- if native_str_value in no_log_strings:
- return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
- for omit_me in no_log_strings:
- native_str_value = native_str_value.replace(omit_me, '*' * 8)
-
- if value_is_text and isinstance(native_str_value, binary_type):
- value = to_text(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
- elif not value_is_text and isinstance(native_str_value, text_type):
- value = to_bytes(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
- else:
- value = native_str_value
-
- elif isinstance(value, Sequence):
- if isinstance(value, MutableSequence):
- new_value = type(value)()
- else:
- new_value = [] # Need a mutable value
- deferred_removals.append((value, new_value))
- value = new_value
-
- elif isinstance(value, Set):
- if isinstance(value, MutableSet):
- new_value = type(value)()
- else:
- new_value = set() # Need a mutable value
- deferred_removals.append((value, new_value))
- value = new_value
-
- elif isinstance(value, Mapping):
- if isinstance(value, MutableMapping):
- new_value = type(value)()
- else:
- new_value = {} # Need a mutable value
- deferred_removals.append((value, new_value))
- value = new_value
-
- elif isinstance(value, tuple(chain(integer_types, (float, bool, NoneType)))):
- stringy_value = to_native(value, encoding='utf-8', errors='surrogate_or_strict')
- if stringy_value in no_log_strings:
- return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
- for omit_me in no_log_strings:
- if omit_me in stringy_value:
- return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
-
- elif isinstance(value, datetime.datetime):
- value = value.isoformat()
- else:
- raise TypeError('Value of unknown type: %s, %s' % (type(value), value))
-
- return value
-
-
-def remove_values(value, no_log_strings):
- """ Remove strings in no_log_strings from value. If value is a container
- type, then remove a lot more.
-
- Use of deferred_removals exists, rather than a pure recursive solution,
- because of the potential to hit the maximum recursion depth when dealing with
- large amounts of data (see issue #24560).
- """
-
- deferred_removals = deque()
-
- no_log_strings = [to_native(s, errors='surrogate_or_strict') for s in no_log_strings]
- new_value = _remove_values_conditions(value, no_log_strings, deferred_removals)
-
- while deferred_removals:
- old_data, new_data = deferred_removals.popleft()
- if isinstance(new_data, Mapping):
- for old_key, old_elem in old_data.items():
- new_elem = _remove_values_conditions(old_elem, no_log_strings, deferred_removals)
- new_data[old_key] = new_elem
- else:
- for elem in old_data:
- new_elem = _remove_values_conditions(elem, no_log_strings, deferred_removals)
- if isinstance(new_data, MutableSequence):
- new_data.append(new_elem)
- elif isinstance(new_data, MutableSet):
- new_data.add(new_elem)
- else:
- raise TypeError('Unknown container type encountered when removing private values from output')
-
- return new_value
-
-
-def _sanitize_keys_conditions(value, no_log_strings, ignore_keys, deferred_removals):
- """ Helper method to sanitize_keys() to build deferred_removals and avoid deep recursion. """
- if isinstance(value, (text_type, binary_type)):
- return value
-
- if isinstance(value, Sequence):
- if isinstance(value, MutableSequence):
- new_value = type(value)()
- else:
- new_value = [] # Need a mutable value
- deferred_removals.append((value, new_value))
- return new_value
-
- if isinstance(value, Set):
- if isinstance(value, MutableSet):
- new_value = type(value)()
- else:
- new_value = set() # Need a mutable value
- deferred_removals.append((value, new_value))
- return new_value
-
- if isinstance(value, Mapping):
- if isinstance(value, MutableMapping):
- new_value = type(value)()
- else:
- new_value = {} # Need a mutable value
- deferred_removals.append((value, new_value))
- return new_value
-
- if isinstance(value, tuple(chain(integer_types, (float, bool, NoneType)))):
- return value
-
- if isinstance(value, (datetime.datetime, datetime.date)):
- return value
-
- raise TypeError('Value of unknown type: %s, %s' % (type(value), value))
-
-
-def sanitize_keys(obj, no_log_strings, ignore_keys=frozenset()):
- """ Sanitize the keys in a container object by removing no_log values from key names.
-
- This is a companion function to the `remove_values()` function. Similar to that function,
- we make use of deferred_removals to avoid hitting maximum recursion depth in cases of
- large data structures.
-
- :param obj: The container object to sanitize. Non-container objects are returned unmodified.
- :param no_log_strings: A set of string values we do not want logged.
- :param ignore_keys: A set of string values of keys to not sanitize.
-
- :returns: An object with sanitized keys.
- """
-
- deferred_removals = deque()
-
- no_log_strings = [to_native(s, errors='surrogate_or_strict') for s in no_log_strings]
- new_value = _sanitize_keys_conditions(obj, no_log_strings, ignore_keys, deferred_removals)
-
- while deferred_removals:
- old_data, new_data = deferred_removals.popleft()
-
- if isinstance(new_data, Mapping):
- for old_key, old_elem in old_data.items():
- if old_key in ignore_keys or old_key.startswith('_ansible'):
- new_data[old_key] = _sanitize_keys_conditions(old_elem, no_log_strings, ignore_keys, deferred_removals)
- else:
- # Sanitize the old key. We take advantage of the sanitizing code in
- # _remove_values_conditions() rather than recreating it here.
- new_key = _remove_values_conditions(old_key, no_log_strings, None)
- new_data[new_key] = _sanitize_keys_conditions(old_elem, no_log_strings, ignore_keys, deferred_removals)
- else:
- for elem in old_data:
- new_elem = _sanitize_keys_conditions(elem, no_log_strings, ignore_keys, deferred_removals)
- if isinstance(new_data, MutableSequence):
- new_data.append(new_elem)
- elif isinstance(new_data, MutableSet):
- new_data.add(new_elem)
- else:
- raise TypeError('Unknown container type encountered when removing private values from keys')
-
- return new_value
-
-
def heuristic_log_sanitize(data, no_log_values=None):
''' Remove strings that look like passwords from log messages '''
# Currently filters:
@@ -635,14 +415,6 @@ def _load_params():
sys.exit(1)
-def env_fallback(*args, **kwargs):
- ''' Load value from environment '''
- for arg in args:
- if arg in os.environ:
- return os.environ[arg]
- raise AnsibleFallbackNotFound
-
-
def missing_required_lib(library, reason=None, url=None):
hostname = platform.node()
msg = "Failed to import the required Python library (%s) on %s's Python %s." % (library, hostname, sys.executable)
@@ -657,10 +429,6 @@ def missing_required_lib(library, reason=None, url=None):
return msg
-class AnsibleFallbackNotFound(Exception):
- pass
-
-
class AnsibleModule(object):
def __init__(self, argument_spec, bypass_checks=False, no_log=False,
mutually_exclusive=None, required_together=None,
@@ -692,6 +460,7 @@ class AnsibleModule(object):
self._diff = False
self._socket_path = None
self._shell = None
+ self._syslog_facility = 'LOG_USER'
self._verbosity = 0
# May be used to set modifications to the environment for any
# run_command invocation
@@ -704,10 +473,7 @@ class AnsibleModule(object):
self._options_context = list()
self._tmpdir = None
- self._created_files = set()
-
if add_file_common_args:
- self._uses_common_file_args = True
for k, v in FILE_COMMON_ARGUMENTS.items():
if k not in self.argument_spec:
self.argument_spec[k] = v
@@ -715,62 +481,52 @@ class AnsibleModule(object):
# Save parameter values that should never be logged
self.no_log_values = set()
+ # check the locale as set by the current environment, and reset to
+ # a known valid (LANG=C) if it's an invalid/unavailable locale
+ self._check_locale()
+
self._load_params()
- self._set_fallbacks()
+ self._set_internal_properties()
- # append to legal_inputs and then possibly check against them
- try:
- self.aliases = self._handle_aliases()
- except (ValueError, TypeError) as e:
- # Use exceptions here because it isn't safe to call fail_json until no_log is processed
- print('\n{"failed": true, "msg": "Module alias error: %s"}' % to_native(e))
- sys.exit(1)
+ self.validator = ModuleArgumentSpecValidator(self.argument_spec,
+ self.mutually_exclusive,
+ self.required_together,
+ self.required_one_of,
+ self.required_if,
+ self.required_by,
+ )
- self._handle_no_log_values()
+ self.validation_result = self.validator.validate(self.params)
+ self.params.update(self.validation_result.validated_parameters)
+ self.no_log_values.update(self.validation_result._no_log_values)
- # check the locale as set by the current environment, and reset to
- # a known valid (LANG=C) if it's an invalid/unavailable locale
- self._check_locale()
+ try:
+ error = self.validation_result.errors[0]
+ except IndexError:
+ error = None
- self._check_arguments()
-
- # check exclusive early
- if not bypass_checks:
- self._check_mutually_exclusive(mutually_exclusive)
-
- self._set_defaults(pre=True)
-
- self._CHECK_ARGUMENT_TYPES_DISPATCHER = {
- 'str': self._check_type_str,
- 'list': self._check_type_list,
- 'dict': self._check_type_dict,
- 'bool': self._check_type_bool,
- 'int': self._check_type_int,
- 'float': self._check_type_float,
- 'path': self._check_type_path,
- 'raw': self._check_type_raw,
- 'jsonarg': self._check_type_jsonarg,
- 'json': self._check_type_jsonarg,
- 'bytes': self._check_type_bytes,
- 'bits': self._check_type_bits,
- }
- if not bypass_checks:
- self._check_required_arguments()
- self._check_argument_types()
- self._check_argument_values()
- self._check_required_together(required_together)
- self._check_required_one_of(required_one_of)
- self._check_required_if(required_if)
- self._check_required_by(required_by)
+ # Fail for validation errors, even in check mode
+ if error:
+ msg = self.validation_result.errors.msg
+ if isinstance(error, UnsupportedError):
+ msg = "Unsupported parameters for ({name}) {kind}: {msg}".format(name=self._name, kind='module', msg=msg)
- self._set_defaults(pre=False)
+ self.fail_json(msg=msg)
+
+ if self.check_mode and not self.supports_check_mode:
+ self.exit_json(skipped=True, msg="remote module (%s) does not support check mode" % self._name)
- # deal with options sub-spec
- self._handle_options()
+ # This is for backwards compatibility only.
+ self._CHECK_ARGUMENT_TYPES_DISPATCHER = DEFAULT_TYPE_VALIDATORS
if not self.no_log:
self._log_invocation()
+ # selinux state caching
+ self._selinux_enabled = None
+ self._selinux_mls_enabled = None
+ self._selinux_initial_context = None
+
# finally, make sure we're in a sane working dir
self._set_cwd()
@@ -884,37 +640,30 @@ class AnsibleModule(object):
# by selinux.lgetfilecon().
def selinux_mls_enabled(self):
- if not HAVE_SELINUX:
- return False
- if selinux.is_selinux_mls_enabled() == 1:
- return True
- else:
- return False
+ if self._selinux_mls_enabled is None:
+ self._selinux_mls_enabled = HAVE_SELINUX and selinux.is_selinux_mls_enabled() == 1
+
+ return self._selinux_mls_enabled
def selinux_enabled(self):
- if not HAVE_SELINUX:
- seenabled = self.get_bin_path('selinuxenabled')
- if seenabled is not None:
- (rc, out, err) = self.run_command(seenabled)
- if rc == 0:
- self.fail_json(msg="Aborting, target uses selinux but python bindings (libselinux-python) aren't installed!")
- return False
- if selinux.is_selinux_enabled() == 1:
- return True
- else:
- return False
+ if self._selinux_enabled is None:
+ self._selinux_enabled = HAVE_SELINUX and selinux.is_selinux_enabled() == 1
+
+ return self._selinux_enabled
# Determine whether we need a placeholder for selevel/mls
def selinux_initial_context(self):
- context = [None, None, None]
- if self.selinux_mls_enabled():
- context.append(None)
- return context
+ if self._selinux_initial_context is None:
+ self._selinux_initial_context = [None, None, None]
+ if self.selinux_mls_enabled():
+ self._selinux_initial_context.append(None)
+
+ return self._selinux_initial_context
# If selinux fails to find a default, return an array of None
def selinux_default_context(self, path, mode=0):
context = self.selinux_initial_context()
- if not HAVE_SELINUX or not self.selinux_enabled():
+ if not self.selinux_enabled():
return context
try:
ret = selinux.matchpathcon(to_native(path, errors='surrogate_or_strict'), mode)
@@ -929,7 +678,7 @@ class AnsibleModule(object):
def selinux_context(self, path):
context = self.selinux_initial_context()
- if not HAVE_SELINUX or not self.selinux_enabled():
+ if not self.selinux_enabled():
return context
try:
ret = selinux.lgetfilecon_raw(to_native(path, errors='surrogate_or_strict'))
@@ -993,14 +742,14 @@ class AnsibleModule(object):
return (False, None)
def set_default_selinux_context(self, path, changed):
- if not HAVE_SELINUX or not self.selinux_enabled():
+ if not self.selinux_enabled():
return changed
context = self.selinux_default_context(path)
return self.set_context_if_different(path, context, False)
def set_context_if_different(self, path, context, changed, diff=None):
- if not HAVE_SELINUX or not self.selinux_enabled():
+ if not self.selinux_enabled():
return changed
if self.check_file_absent_if_check_mode(path):
@@ -1127,24 +876,18 @@ class AnsibleModule(object):
def set_mode_if_different(self, path, mode, changed, diff=None, expand=True):
- # Remove paths so we do not warn about creating with default permissions
- # since we are calling this method on the path and setting the specified mode.
- try:
- self._created_files.remove(path)
- except KeyError:
- pass
-
if mode is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
- path_stat = os.lstat(b_path)
if self.check_file_absent_if_check_mode(b_path):
return True
+ path_stat = os.lstat(b_path)
+
if not isinstance(mode, int):
try:
mode = int(mode, 8)
@@ -1228,7 +971,7 @@ class AnsibleModule(object):
if self.check_file_absent_if_check_mode(b_path):
return True
- existing = self.get_file_attributes(b_path)
+ existing = self.get_file_attributes(b_path, include_version=False)
attr_mod = '='
if attributes.startswith(('-', '+')):
@@ -1259,17 +1002,21 @@ class AnsibleModule(object):
details=to_native(e), exception=traceback.format_exc())
return changed
- def get_file_attributes(self, path):
+ def get_file_attributes(self, path, include_version=True):
output = {}
attrcmd = self.get_bin_path('lsattr', False)
if attrcmd:
- attrcmd = [attrcmd, '-vd', path]
+ flags = '-vd' if include_version else '-d'
+ attrcmd = [attrcmd, flags, path]
try:
rc, out, err = self.run_command(attrcmd)
if rc == 0:
res = out.split()
- output['attr_flags'] = res[1].replace('-', '').strip()
- output['version'] = res[0].strip()
+ attr_flags_idx = 0
+ if include_version:
+ attr_flags_idx = 1
+ output['version'] = res[0].strip()
+ output['attr_flags'] = res[attr_flags_idx].replace('-', '').strip()
output['attributes'] = format_attributes(output['attr_flags'])
except Exception:
pass
@@ -1436,11 +1183,6 @@ class AnsibleModule(object):
def set_file_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
- def add_atomic_move_warnings(self):
- for path in sorted(self._created_files):
- self.warn("File '{0}' created with default permissions '{1:o}'. The previous default was '666'. "
- "Specify 'mode' to avoid this warning.".format(to_native(path), DEFAULT_PERM))
-
def add_path_info(self, kwargs):
'''
for results that are files, supplement the info about the file
@@ -1476,7 +1218,7 @@ class AnsibleModule(object):
kwargs['state'] = 'hard'
else:
kwargs['state'] = 'file'
- if HAVE_SELINUX and self.selinux_enabled():
+ if self.selinux_enabled():
kwargs['secontext'] = ':'.join(self.selinux_context(path))
kwargs['size'] = st[stat.ST_SIZE]
return kwargs
@@ -1502,70 +1244,20 @@ class AnsibleModule(object):
self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" %
to_native(e), exception=traceback.format_exc())
- def _handle_aliases(self, spec=None, param=None, option_prefix=''):
- if spec is None:
- spec = self.argument_spec
- if param is None:
- param = self.params
-
- # this uses exceptions as it happens before we can safely call fail_json
- alias_warnings = []
- alias_results, self._legal_inputs = handle_aliases(spec, param, alias_warnings=alias_warnings)
- for option, alias in alias_warnings:
- warn('Both option %s and its alias %s are set.' % (option_prefix + option, option_prefix + alias))
-
- deprecated_aliases = []
- for i in spec.keys():
- if 'deprecated_aliases' in spec[i].keys():
- for alias in spec[i]['deprecated_aliases']:
- deprecated_aliases.append(alias)
-
- for deprecation in deprecated_aliases:
- if deprecation['name'] in param.keys():
- deprecate("Alias '%s' is deprecated. See the module docs for more information" % deprecation['name'],
- version=deprecation.get('version'), date=deprecation.get('date'),
- collection_name=deprecation.get('collection_name'))
- return alias_results
-
- def _handle_no_log_values(self, spec=None, param=None):
- if spec is None:
- spec = self.argument_spec
- if param is None:
- param = self.params
-
- try:
- self.no_log_values.update(list_no_log_values(spec, param))
- except TypeError as te:
- self.fail_json(msg="Failure when processing no_log parameters. Module invocation will be hidden. "
- "%s" % to_native(te), invocation={'module_args': 'HIDDEN DUE TO FAILURE'})
-
- for message in list_deprecations(spec, param):
- deprecate(message['msg'], version=message.get('version'), date=message.get('date'),
- collection_name=message.get('collection_name'))
-
- def _check_arguments(self, spec=None, param=None, legal_inputs=None):
- self._syslog_facility = 'LOG_USER'
- unsupported_parameters = set()
- if spec is None:
- spec = self.argument_spec
- if param is None:
- param = self.params
- if legal_inputs is None:
- legal_inputs = self._legal_inputs
-
- for k in list(param.keys()):
-
- if k not in legal_inputs:
- unsupported_parameters.add(k)
+ def _set_internal_properties(self, argument_spec=None, module_parameters=None):
+ if argument_spec is None:
+ argument_spec = self.argument_spec
+ if module_parameters is None:
+ module_parameters = self.params
for k in PASS_VARS:
# handle setting internal properties from internal ansible vars
param_key = '_ansible_%s' % k
- if param_key in param:
+ if param_key in module_parameters:
if k in PASS_BOOLS:
- setattr(self, PASS_VARS[k][0], self.boolean(param[param_key]))
+ setattr(self, PASS_VARS[k][0], self.boolean(module_parameters[param_key]))
else:
- setattr(self, PASS_VARS[k][0], param[param_key])
+ setattr(self, PASS_VARS[k][0], module_parameters[param_key])
# clean up internal top level params:
if param_key in self.params:
@@ -1575,412 +1267,9 @@ class AnsibleModule(object):
if not hasattr(self, PASS_VARS[k][0]):
setattr(self, PASS_VARS[k][0], PASS_VARS[k][1])
- if unsupported_parameters:
- msg = "Unsupported parameters for (%s) module: %s" % (self._name, ', '.join(sorted(list(unsupported_parameters))))
- if self._options_context:
- msg += " found in %s." % " -> ".join(self._options_context)
- msg += " Supported parameters include: %s" % (', '.join(sorted(spec.keys())))
- self.fail_json(msg=msg)
- if self.check_mode and not self.supports_check_mode:
- self.exit_json(skipped=True, msg="remote module (%s) does not support check mode" % self._name)
-
- def _count_terms(self, check, param=None):
- if param is None:
- param = self.params
- return count_terms(check, param)
-
- def _check_mutually_exclusive(self, spec, param=None):
- if param is None:
- param = self.params
-
- try:
- check_mutually_exclusive(spec, param)
- except TypeError as e:
- msg = to_native(e)
- if self._options_context:
- msg += " found in %s" % " -> ".join(self._options_context)
- self.fail_json(msg=msg)
-
- def _check_required_one_of(self, spec, param=None):
- if spec is None:
- return
-
- if param is None:
- param = self.params
-
- try:
- check_required_one_of(spec, param)
- except TypeError as e:
- msg = to_native(e)
- if self._options_context:
- msg += " found in %s" % " -> ".join(self._options_context)
- self.fail_json(msg=msg)
-
- def _check_required_together(self, spec, param=None):
- if spec is None:
- return
- if param is None:
- param = self.params
-
- try:
- check_required_together(spec, param)
- except TypeError as e:
- msg = to_native(e)
- if self._options_context:
- msg += " found in %s" % " -> ".join(self._options_context)
- self.fail_json(msg=msg)
-
- def _check_required_by(self, spec, param=None):
- if spec is None:
- return
- if param is None:
- param = self.params
-
- try:
- check_required_by(spec, param)
- except TypeError as e:
- self.fail_json(msg=to_native(e))
-
- def _check_required_arguments(self, spec=None, param=None):
- if spec is None:
- spec = self.argument_spec
- if param is None:
- param = self.params
-
- try:
- check_required_arguments(spec, param)
- except TypeError as e:
- msg = to_native(e)
- if self._options_context:
- msg += " found in %s" % " -> ".join(self._options_context)
- self.fail_json(msg=msg)
-
- def _check_required_if(self, spec, param=None):
- ''' ensure that parameters which conditionally required are present '''
- if spec is None:
- return
- if param is None:
- param = self.params
-
- try:
- check_required_if(spec, param)
- except TypeError as e:
- msg = to_native(e)
- if self._options_context:
- msg += " found in %s" % " -> ".join(self._options_context)
- self.fail_json(msg=msg)
-
- def _check_argument_values(self, spec=None, param=None):
- ''' ensure all arguments have the requested values, and there are no stray arguments '''
- if spec is None:
- spec = self.argument_spec
- if param is None:
- param = self.params
- for (k, v) in spec.items():
- choices = v.get('choices', None)
- if choices is None:
- continue
- if isinstance(choices, SEQUENCETYPE) and not isinstance(choices, (binary_type, text_type)):
- if k in param:
- # Allow one or more when type='list' param with choices
- if isinstance(param[k], list):
- diff_list = ", ".join([item for item in param[k] if item not in choices])
- if diff_list:
- choices_str = ", ".join([to_native(c) for c in choices])
- msg = "value of %s must be one or more of: %s. Got no match for: %s" % (k, choices_str, diff_list)
- if self._options_context:
- msg += " found in %s" % " -> ".join(self._options_context)
- self.fail_json(msg=msg)
- elif param[k] not in choices:
- # PyYaml converts certain strings to bools. If we can unambiguously convert back, do so before checking
- # the value. If we can't figure this out, module author is responsible.
- lowered_choices = None
- if param[k] == 'False':
- lowered_choices = lenient_lowercase(choices)
- overlap = BOOLEANS_FALSE.intersection(choices)
- if len(overlap) == 1:
- # Extract from a set
- (param[k],) = overlap
-
- if param[k] == 'True':
- if lowered_choices is None:
- lowered_choices = lenient_lowercase(choices)
- overlap = BOOLEANS_TRUE.intersection(choices)
- if len(overlap) == 1:
- (param[k],) = overlap
-
- if param[k] not in choices:
- choices_str = ", ".join([to_native(c) for c in choices])
- msg = "value of %s must be one of: %s, got: %s" % (k, choices_str, param[k])
- if self._options_context:
- msg += " found in %s" % " -> ".join(self._options_context)
- self.fail_json(msg=msg)
- else:
- msg = "internal error: choices for argument %s are not iterable: %s" % (k, choices)
- if self._options_context:
- msg += " found in %s" % " -> ".join(self._options_context)
- self.fail_json(msg=msg)
-
def safe_eval(self, value, locals=None, include_exceptions=False):
return safe_eval(value, locals, include_exceptions)
- def _check_type_str(self, value, param=None, prefix=''):
- opts = {
- 'error': False,
- 'warn': False,
- 'ignore': True
- }
-
- # Ignore, warn, or error when converting to a string.
- allow_conversion = opts.get(self._string_conversion_action, True)
- try:
- return check_type_str(value, allow_conversion)
- except TypeError:
- common_msg = 'quote the entire value to ensure it does not change.'
- from_msg = '{0!r}'.format(value)
- to_msg = '{0!r}'.format(to_text(value))
-
- if param is not None:
- if prefix:
- param = '{0}{1}'.format(prefix, param)
-
- from_msg = '{0}: {1!r}'.format(param, value)
- to_msg = '{0}: {1!r}'.format(param, to_text(value))
-
- if self._string_conversion_action == 'error':
- msg = common_msg.capitalize()
- raise TypeError(to_native(msg))
- elif self._string_conversion_action == 'warn':
- msg = ('The value "{0}" (type {1.__class__.__name__}) was converted to "{2}" (type string). '
- 'If this does not look like what you expect, {3}').format(from_msg, value, to_msg, common_msg)
- self.warn(to_native(msg))
- return to_native(value, errors='surrogate_or_strict')
-
- def _check_type_list(self, value):
- return check_type_list(value)
-
- def _check_type_dict(self, value):
- return check_type_dict(value)
-
- def _check_type_bool(self, value):
- return check_type_bool(value)
-
- def _check_type_int(self, value):
- return check_type_int(value)
-
- def _check_type_float(self, value):
- return check_type_float(value)
-
- def _check_type_path(self, value):
- return check_type_path(value)
-
- def _check_type_jsonarg(self, value):
- return check_type_jsonarg(value)
-
- def _check_type_raw(self, value):
- return check_type_raw(value)
-
- def _check_type_bytes(self, value):
- return check_type_bytes(value)
-
- def _check_type_bits(self, value):
- return check_type_bits(value)
-
- def _handle_options(self, argument_spec=None, params=None, prefix=''):
- ''' deal with options to create sub spec '''
- if argument_spec is None:
- argument_spec = self.argument_spec
- if params is None:
- params = self.params
-
- for (k, v) in argument_spec.items():
- wanted = v.get('type', None)
- if wanted == 'dict' or (wanted == 'list' and v.get('elements', '') == 'dict'):
- spec = v.get('options', None)
- if v.get('apply_defaults', False):
- if spec is not None:
- if params.get(k) is None:
- params[k] = {}
- else:
- continue
- elif spec is None or k not in params or params[k] is None:
- continue
-
- self._options_context.append(k)
-
- if isinstance(params[k], dict):
- elements = [params[k]]
- else:
- elements = params[k]
-
- for idx, param in enumerate(elements):
- if not isinstance(param, dict):
- self.fail_json(msg="value of %s must be of type dict or list of dict" % k)
-
- new_prefix = prefix + k
- if wanted == 'list':
- new_prefix += '[%d]' % idx
- new_prefix += '.'
-
- self._set_fallbacks(spec, param)
- options_aliases = self._handle_aliases(spec, param, option_prefix=new_prefix)
-
- options_legal_inputs = list(spec.keys()) + list(options_aliases.keys())
-
- self._check_arguments(spec, param, options_legal_inputs)
-
- # check exclusive early
- if not self.bypass_checks:
- self._check_mutually_exclusive(v.get('mutually_exclusive', None), param)
-
- self._set_defaults(pre=True, spec=spec, param=param)
-
- if not self.bypass_checks:
- self._check_required_arguments(spec, param)
- self._check_argument_types(spec, param, new_prefix)
- self._check_argument_values(spec, param)
-
- self._check_required_together(v.get('required_together', None), param)
- self._check_required_one_of(v.get('required_one_of', None), param)
- self._check_required_if(v.get('required_if', None), param)
- self._check_required_by(v.get('required_by', None), param)
-
- self._set_defaults(pre=False, spec=spec, param=param)
-
- # handle multi level options (sub argspec)
- self._handle_options(spec, param, new_prefix)
- self._options_context.pop()
-
- def _get_wanted_type(self, wanted, k):
- if not callable(wanted):
- if wanted is None:
- # Mostly we want to default to str.
- # For values set to None explicitly, return None instead as
- # that allows a user to unset a parameter
- wanted = 'str'
- try:
- type_checker = self._CHECK_ARGUMENT_TYPES_DISPATCHER[wanted]
- except KeyError:
- self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k))
- else:
- # set the type_checker to the callable, and reset wanted to the callable's name (or type if it doesn't have one, ala MagicMock)
- type_checker = wanted
- wanted = getattr(wanted, '__name__', to_native(type(wanted)))
-
- return type_checker, wanted
-
- def _handle_elements(self, wanted, param, values):
- type_checker, wanted_name = self._get_wanted_type(wanted, param)
- validated_params = []
- # Get param name for strings so we can later display this value in a useful error message if needed
- # Only pass 'kwargs' to our checkers and ignore custom callable checkers
- kwargs = {}
- if wanted_name == 'str' and isinstance(wanted, string_types):
- if isinstance(param, string_types):
- kwargs['param'] = param
- elif isinstance(param, dict):
- kwargs['param'] = list(param.keys())[0]
- for value in values:
- try:
- validated_params.append(type_checker(value, **kwargs))
- except (TypeError, ValueError) as e:
- msg = "Elements value for option %s" % param
- if self._options_context:
- msg += " found in '%s'" % " -> ".join(self._options_context)
- msg += " is of type %s and we were unable to convert to %s: %s" % (type(value), wanted_name, to_native(e))
- self.fail_json(msg=msg)
- return validated_params
-
- def _check_argument_types(self, spec=None, param=None, prefix=''):
- ''' ensure all arguments have the requested type '''
-
- if spec is None:
- spec = self.argument_spec
- if param is None:
- param = self.params
-
- for (k, v) in spec.items():
- wanted = v.get('type', None)
- if k not in param:
- continue
-
- value = param[k]
- if value is None:
- continue
-
- type_checker, wanted_name = self._get_wanted_type(wanted, k)
- # Get param name for strings so we can later display this value in a useful error message if needed
- # Only pass 'kwargs' to our checkers and ignore custom callable checkers
- kwargs = {}
- if wanted_name == 'str' and isinstance(type_checker, string_types):
- kwargs['param'] = list(param.keys())[0]
-
- # Get the name of the parent key if this is a nested option
- if prefix:
- kwargs['prefix'] = prefix
-
- try:
- param[k] = type_checker(value, **kwargs)
- wanted_elements = v.get('elements', None)
- if wanted_elements:
- if wanted != 'list' or not isinstance(param[k], list):
- msg = "Invalid type %s for option '%s'" % (wanted_name, param)
- if self._options_context:
- msg += " found in '%s'." % " -> ".join(self._options_context)
- msg += ", elements value check is supported only with 'list' type"
- self.fail_json(msg=msg)
- param[k] = self._handle_elements(wanted_elements, k, param[k])
-
- except (TypeError, ValueError) as e:
- msg = "argument %s is of type %s" % (k, type(value))
- if self._options_context:
- msg += " found in '%s'." % " -> ".join(self._options_context)
- msg += " and we were unable to convert to %s: %s" % (wanted_name, to_native(e))
- self.fail_json(msg=msg)
-
- def _set_defaults(self, pre=True, spec=None, param=None):
- if spec is None:
- spec = self.argument_spec
- if param is None:
- param = self.params
- for (k, v) in spec.items():
- default = v.get('default', None)
-
- # This prevents setting defaults on required items on the 1st run,
- # otherwise will set things without a default to None on the 2nd.
- if k not in param and (default is not None or not pre):
- # Make sure any default value for no_log fields are masked.
- if v.get('no_log', False) and default:
- self.no_log_values.add(default)
-
- param[k] = default
-
- def _set_fallbacks(self, spec=None, param=None):
- if spec is None:
- spec = self.argument_spec
- if param is None:
- param = self.params
-
- for (k, v) in spec.items():
- fallback = v.get('fallback', (None,))
- fallback_strategy = fallback[0]
- fallback_args = []
- fallback_kwargs = {}
- if k not in param and fallback_strategy is not None:
- for item in fallback[1:]:
- if isinstance(item, dict):
- fallback_kwargs = item
- else:
- fallback_args = item
- try:
- fallback_value = fallback_strategy(*fallback_args, **fallback_kwargs)
- except AnsibleFallbackNotFound:
- continue
- else:
- if v.get('no_log', False) and fallback_value:
- self.no_log_values.add(fallback_value)
- param[k] = fallback_value
-
def _load_params(self):
''' read the input and set the params attribute.
@@ -1992,10 +1281,19 @@ class AnsibleModule(object):
def _log_to_syslog(self, msg):
if HAS_SYSLOG:
- module = 'ansible-%s' % self._name
- facility = getattr(syslog, self._syslog_facility, syslog.LOG_USER)
- syslog.openlog(str(module), 0, facility)
- syslog.syslog(syslog.LOG_INFO, msg)
+ try:
+ module = 'ansible-%s' % self._name
+ facility = getattr(syslog, self._syslog_facility, syslog.LOG_USER)
+ syslog.openlog(str(module), 0, facility)
+ syslog.syslog(syslog.LOG_INFO, msg)
+ except TypeError as e:
+ self.fail_json(
+ msg='Failed to log to syslog (%s). To proceed anyway, '
+ 'disable syslog logging by setting no_target_syslog '
+ 'to True in your Ansible config.' % to_native(e),
+ exception=traceback.format_exc(),
+ msg_to_log=msg,
+ )
def debug(self, msg):
if self._debug:
@@ -2156,7 +1454,6 @@ class AnsibleModule(object):
def _return_formatted(self, kwargs):
- self.add_atomic_move_warnings()
self.add_path_info(kwargs)
if 'invocation' not in kwargs:
@@ -2333,7 +1630,7 @@ class AnsibleModule(object):
raise
# Set the attributes
- current_attribs = self.get_file_attributes(src)
+ current_attribs = self.get_file_attributes(src, include_version=False)
current_attribs = current_attribs.get('attr_flags', '')
self.set_attributes_if_different(dest, current_attribs, True)
@@ -2451,16 +1748,6 @@ class AnsibleModule(object):
self.cleanup(b_tmp_dest_name)
if creating:
- # Keep track of what files we create here with default permissions so later we can see if the permissions
- # are explicitly set with a follow up call to set_mode_if_different().
- #
- # Only warn if the module accepts 'mode' parameter so the user can take action.
- # If the module does not allow the user to set 'mode', then the warning is useless to the
- # user since it provides no actionable information.
- #
- if self.argument_spec.get('mode') and self.params.get('mode') is None:
- self._created_files.add(dest)
-
# make sure the file has the correct permissions
# based on the current value of umask
umask = os.umask(0)
@@ -2665,8 +1952,12 @@ class AnsibleModule(object):
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if path_prefix:
- old_env_vals['PATH'] = os.environ['PATH']
- os.environ['PATH'] = "%s:%s" % (path_prefix, os.environ['PATH'])
+ path = os.environ.get('PATH', '')
+ old_env_vals['PATH'] = path
+ if path:
+ os.environ['PATH'] = "%s:%s" % (path_prefix, path)
+ else:
+ os.environ['PATH'] = path_prefix
# If using test-module.py and explode, the remote lib path will resemble:
# /tmp/test_module_scratch/debug_dir/ansible/module_utils/basic.py
@@ -2734,7 +2025,7 @@ class AnsibleModule(object):
stderr = b''
try:
selector = selectors.DefaultSelector()
- except OSError:
+ except (IOError, OSError):
# Failed to detect default selector for the given platform
# Select PollSelector which is supported by major platforms
selector = selectors.PollSelector()
@@ -2790,10 +2081,10 @@ class AnsibleModule(object):
rc = cmd.returncode
except (OSError, IOError) as e:
self.log("Error Executing CMD:%s Exception:%s" % (self._clean_args(args), to_native(e)))
- self.fail_json(rc=e.errno, msg=to_native(e), cmd=self._clean_args(args))
+ self.fail_json(rc=e.errno, stdout=b'', stderr=b'', msg=to_native(e), cmd=self._clean_args(args))
except Exception as e:
self.log("Error Executing CMD:%s Exception:%s" % (self._clean_args(args), to_native(traceback.format_exc())))
- self.fail_json(rc=257, msg=to_native(e), exception=traceback.format_exc(), cmd=self._clean_args(args))
+ self.fail_json(rc=257, stdout=b'', stderr=b'', msg=to_native(e), exception=traceback.format_exc(), cmd=self._clean_args(args))
# Restore env settings
for key, val in old_env_vals.items():
diff --git a/lib/ansible/module_utils/common/arg_spec.py b/lib/ansible/module_utils/common/arg_spec.py
new file mode 100644
index 00000000..781f6948
--- /dev/null
+++ b/lib/ansible/module_utils/common/arg_spec.py
@@ -0,0 +1,286 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from copy import deepcopy
+
+from ansible.module_utils.common.parameters import (
+ _ADDITIONAL_CHECKS,
+ _get_legal_inputs,
+ _get_unsupported_parameters,
+ _handle_aliases,
+ _list_no_log_values,
+ _set_defaults,
+ _validate_argument_types,
+ _validate_argument_values,
+ _validate_sub_spec,
+ set_fallbacks,
+)
+
+from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils.common.warnings import deprecate, warn
+
+from ansible.module_utils.common.validation import (
+ check_mutually_exclusive,
+ check_required_arguments,
+ check_required_by,
+ check_required_if,
+ check_required_one_of,
+ check_required_together,
+)
+
+from ansible.module_utils.errors import (
+ AliasError,
+ AnsibleValidationErrorMultiple,
+ MutuallyExclusiveError,
+ NoLogError,
+ RequiredByError,
+ RequiredDefaultError,
+ RequiredError,
+ RequiredIfError,
+ RequiredOneOfError,
+ RequiredTogetherError,
+ UnsupportedError,
+)
+
+
+class ValidationResult:
+ """Result of argument spec validation.
+
+ This is the object returned by :func:`ArgumentSpecValidator.validate()
+ <ansible.module_utils.common.arg_spec.ArgumentSpecValidator.validate()>`
+ containing the validated parameters and any errors.
+ """
+
+ def __init__(self, parameters):
+ """
+ :arg parameters: Terms to be validated and coerced to the correct type.
+ :type parameters: dict
+ """
+ self._no_log_values = set()
+ """:class:`set` of values marked as ``no_log`` in the argument spec. This
+ is a temporary holding place for these values and may move in the future.
+ """
+
+ self._unsupported_parameters = set()
+ self._validated_parameters = deepcopy(parameters)
+ self._deprecations = []
+ self._warnings = []
+ self.errors = AnsibleValidationErrorMultiple()
+ """
+ :class:`~ansible.module_utils.errors.AnsibleValidationErrorMultiple` containing all
+ :class:`~ansible.module_utils.errors.AnsibleValidationError` objects if there were
+ any failures during validation.
+ """
+
+ @property
+ def validated_parameters(self):
+ """Validated and coerced parameters."""
+ return self._validated_parameters
+
+ @property
+ def unsupported_parameters(self):
+ """:class:`set` of unsupported parameter names."""
+ return self._unsupported_parameters
+
+ @property
+ def error_messages(self):
+ """:class:`list` of all error messages from each exception in :attr:`errors`."""
+ return self.errors.messages
+
+
+class ArgumentSpecValidator:
+ """Argument spec validation class
+
+ Creates a validator based on the ``argument_spec`` that can be used to
+ validate a number of parameters using the :meth:`validate` method.
+ """
+
+ def __init__(self, argument_spec,
+ mutually_exclusive=None,
+ required_together=None,
+ required_one_of=None,
+ required_if=None,
+ required_by=None,
+ ):
+
+ """
+ :arg argument_spec: Specification of valid parameters and their type. May
+ include nested argument specs.
+ :type argument_spec: dict[str, dict]
+
+ :kwarg mutually_exclusive: List or list of lists of terms that should not
+ be provided together.
+ :type mutually_exclusive: list[str] or list[list[str]]
+
+ :kwarg required_together: List of lists of terms that are required together.
+ :type required_together: list[list[str]]
+
+ :kwarg required_one_of: List of lists of terms, one of which in each list
+ is required.
+ :type required_one_of: list[list[str]]
+
+ :kwarg required_if: List of lists of ``[parameter, value, [parameters]]`` where
+ one of ``[parameters]`` is required if ``parameter == value``.
+ :type required_if: list
+
+ :kwarg required_by: Dictionary of parameter names that contain a list of
+ parameters required by each key in the dictionary.
+ :type required_by: dict[str, list[str]]
+ """
+
+ self._mutually_exclusive = mutually_exclusive
+ self._required_together = required_together
+ self._required_one_of = required_one_of
+ self._required_if = required_if
+ self._required_by = required_by
+ self._valid_parameter_names = set()
+ self.argument_spec = argument_spec
+
+ for key in sorted(self.argument_spec.keys()):
+ aliases = self.argument_spec[key].get('aliases')
+ if aliases:
+ self._valid_parameter_names.update(["{key} ({aliases})".format(key=key, aliases=", ".join(sorted(aliases)))])
+ else:
+ self._valid_parameter_names.update([key])
+
+ def validate(self, parameters, *args, **kwargs):
+ """Validate ``parameters`` against argument spec.
+
+ Error messages in the :class:`ValidationResult` may contain no_log values and should be
+ sanitized with :func:`~ansible.module_utils.common.parameters.sanitize_keys` before logging or displaying.
+
+ :arg parameters: Parameters to validate against the argument spec
+ :type parameters: dict[str, dict]
+
+ :return: :class:`ValidationResult` containing validated parameters.
+
+ :Simple Example:
+
+ .. code-block:: text
+
+ argument_spec = {
+ 'name': {'type': 'str'},
+ 'age': {'type': 'int'},
+ }
+
+ parameters = {
+ 'name': 'bo',
+ 'age': '42',
+ }
+
+ validator = ArgumentSpecValidator(argument_spec)
+ result = validator.validate(parameters)
+
+ if result.error_messages:
+ sys.exit("Validation failed: {0}".format(", ".join(result.error_messages))
+
+ valid_params = result.validated_parameters
+ """
+
+ result = ValidationResult(parameters)
+
+ result._no_log_values.update(set_fallbacks(self.argument_spec, result._validated_parameters))
+
+ alias_warnings = []
+ alias_deprecations = []
+ try:
+ aliases = _handle_aliases(self.argument_spec, result._validated_parameters, alias_warnings, alias_deprecations)
+ except (TypeError, ValueError) as e:
+ aliases = {}
+ result.errors.append(AliasError(to_native(e)))
+
+ legal_inputs = _get_legal_inputs(self.argument_spec, result._validated_parameters, aliases)
+
+ for option, alias in alias_warnings:
+ result._warnings.append({'option': option, 'alias': alias})
+
+ for deprecation in alias_deprecations:
+ result._deprecations.append({
+ 'name': deprecation['name'],
+ 'version': deprecation.get('version'),
+ 'date': deprecation.get('date'),
+ 'collection_name': deprecation.get('collection_name'),
+ })
+
+ try:
+ result._no_log_values.update(_list_no_log_values(self.argument_spec, result._validated_parameters))
+ except TypeError as te:
+ result.errors.append(NoLogError(to_native(te)))
+
+ try:
+ result._unsupported_parameters.update(_get_unsupported_parameters(self.argument_spec, result._validated_parameters, legal_inputs))
+ except TypeError as te:
+ result.errors.append(RequiredDefaultError(to_native(te)))
+ except ValueError as ve:
+ result.errors.append(AliasError(to_native(ve)))
+
+ try:
+ check_mutually_exclusive(self._mutually_exclusive, result._validated_parameters)
+ except TypeError as te:
+ result.errors.append(MutuallyExclusiveError(to_native(te)))
+
+ result._no_log_values.update(_set_defaults(self.argument_spec, result._validated_parameters, False))
+
+ try:
+ check_required_arguments(self.argument_spec, result._validated_parameters)
+ except TypeError as e:
+ result.errors.append(RequiredError(to_native(e)))
+
+ _validate_argument_types(self.argument_spec, result._validated_parameters, errors=result.errors)
+ _validate_argument_values(self.argument_spec, result._validated_parameters, errors=result.errors)
+
+ for check in _ADDITIONAL_CHECKS:
+ try:
+ check['func'](getattr(self, "_{attr}".format(attr=check['attr'])), result._validated_parameters)
+ except TypeError as te:
+ result.errors.append(check['err'](to_native(te)))
+
+ result._no_log_values.update(_set_defaults(self.argument_spec, result._validated_parameters))
+
+ _validate_sub_spec(self.argument_spec, result._validated_parameters,
+ errors=result.errors,
+ no_log_values=result._no_log_values,
+ unsupported_parameters=result._unsupported_parameters)
+
+ if result._unsupported_parameters:
+ flattened_names = []
+ for item in result._unsupported_parameters:
+ if isinstance(item, tuple):
+ flattened_names.append(".".join(item))
+ else:
+ flattened_names.append(item)
+
+ unsupported_string = ", ".join(sorted(list(flattened_names)))
+ supported_string = ", ".join(self._valid_parameter_names)
+ result.errors.append(
+ UnsupportedError("{0}. Supported parameters include: {1}.".format(unsupported_string, supported_string)))
+
+ return result
+
+
+class ModuleArgumentSpecValidator(ArgumentSpecValidator):
+ """Argument spec validation class used by :class:`AnsibleModule`.
+
+ This is not meant to be used outside of :class:`AnsibleModule`. Use
+ :class:`ArgumentSpecValidator` instead.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(ModuleArgumentSpecValidator, self).__init__(*args, **kwargs)
+
+ def validate(self, parameters):
+ result = super(ModuleArgumentSpecValidator, self).validate(parameters)
+
+ for d in result._deprecations:
+ deprecate("Alias '{name}' is deprecated. See the module docs for more information".format(name=d['name']),
+ version=d.get('version'), date=d.get('date'),
+ collection_name=d.get('collection_name'))
+
+ for w in result._warnings:
+ warn('Both option {option} and its alias {alias} are set.'.format(option=w['option'], alias=w['alias']))
+
+ return result
diff --git a/lib/ansible/module_utils/common/parameters.py b/lib/ansible/module_utils/common/parameters.py
index 4cf631e1..2624bb50 100644
--- a/lib/ansible/module_utils/common/parameters.py
+++ b/lib/ansible/module_utils/common/parameters.py
@@ -5,21 +5,83 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-from ansible.module_utils._text import to_native
-from ansible.module_utils.common._collections_compat import Mapping
+import datetime
+import os
+
+from collections import deque
+from itertools import chain
+
from ansible.module_utils.common.collections import is_iterable
-from ansible.module_utils.common.validation import check_type_dict
+from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
+from ansible.module_utils.common.text.formatters import lenient_lowercase
+from ansible.module_utils.common.warnings import warn
+from ansible.module_utils.errors import (
+ AliasError,
+ AnsibleFallbackNotFound,
+ AnsibleValidationErrorMultiple,
+ ArgumentTypeError,
+ ArgumentValueError,
+ ElementError,
+ MutuallyExclusiveError,
+ NoLogError,
+ RequiredByError,
+ RequiredError,
+ RequiredIfError,
+ RequiredOneOfError,
+ RequiredTogetherError,
+ SubParameterTypeError,
+)
+from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE, BOOLEANS_TRUE
+
+from ansible.module_utils.common._collections_compat import (
+ KeysView,
+ Set,
+ Sequence,
+ Mapping,
+ MutableMapping,
+ MutableSet,
+ MutableSequence,
+)
from ansible.module_utils.six import (
binary_type,
integer_types,
string_types,
text_type,
+ PY2,
+ PY3,
+)
+
+from ansible.module_utils.common.validation import (
+ check_mutually_exclusive,
+ check_required_arguments,
+ check_required_together,
+ check_required_one_of,
+ check_required_if,
+ check_required_by,
+ check_type_bits,
+ check_type_bool,
+ check_type_bytes,
+ check_type_dict,
+ check_type_float,
+ check_type_int,
+ check_type_jsonarg,
+ check_type_list,
+ check_type_path,
+ check_type_raw,
+ check_type_str,
)
# Python2 & 3 way to get NoneType
NoneType = type(None)
+_ADDITIONAL_CHECKS = (
+ {'func': check_required_together, 'attr': 'required_together', 'err': RequiredTogetherError},
+ {'func': check_required_one_of, 'attr': 'required_one_of', 'err': RequiredOneOfError},
+ {'func': check_required_if, 'attr': 'required_if', 'err': RequiredIfError},
+ {'func': check_required_by, 'attr': 'required_by', 'err': RequiredByError},
+)
+
# if adding boolean attribute, also add to PASS_BOOL
# some of this dupes defaults from controller config
PASS_VARS = {
@@ -42,97 +104,168 @@ PASS_VARS = {
PASS_BOOLS = ('check_mode', 'debug', 'diff', 'keep_remote_files', 'no_log')
+DEFAULT_TYPE_VALIDATORS = {
+ 'str': check_type_str,
+ 'list': check_type_list,
+ 'dict': check_type_dict,
+ 'bool': check_type_bool,
+ 'int': check_type_int,
+ 'float': check_type_float,
+ 'path': check_type_path,
+ 'raw': check_type_raw,
+ 'jsonarg': check_type_jsonarg,
+ 'json': check_type_jsonarg,
+ 'bytes': check_type_bytes,
+ 'bits': check_type_bits,
+}
-def _return_datastructure_name(obj):
- """ Return native stringified values from datastructures.
- For use with removing sensitive values pre-jsonification."""
- if isinstance(obj, (text_type, binary_type)):
- if obj:
- yield to_native(obj, errors='surrogate_or_strict')
- return
- elif isinstance(obj, Mapping):
- for element in obj.items():
- for subelement in _return_datastructure_name(element[1]):
- yield subelement
- elif is_iterable(obj):
- for element in obj:
- for subelement in _return_datastructure_name(element):
- yield subelement
- elif isinstance(obj, (bool, NoneType)):
- # This must come before int because bools are also ints
- return
- elif isinstance(obj, tuple(list(integer_types) + [float])):
- yield to_native(obj, nonstring='simplerepr')
+def _get_type_validator(wanted):
+ """Returns the callable used to validate a wanted type and the type name.
+
+ :arg wanted: String or callable. If a string, get the corresponding
+ validation function from DEFAULT_TYPE_VALIDATORS. If callable,
+ get the name of the custom callable and return that for the type_checker.
+
+ :returns: Tuple of callable function or None, and a string that is the name
+ of the wanted type.
+ """
+
+ # Use one of our builtin validators.
+ if not callable(wanted):
+ if wanted is None:
+ # Default type for parameters
+ wanted = 'str'
+
+ type_checker = DEFAULT_TYPE_VALIDATORS.get(wanted)
+
+ # Use the custom callable for validation.
else:
- raise TypeError('Unknown parameter type: %s' % (type(obj)))
+ type_checker = wanted
+ wanted = getattr(wanted, '__name__', to_native(type(wanted)))
+ return type_checker, wanted
-def list_no_log_values(argument_spec, params):
- """Return set of no log values
- :arg argument_spec: An argument spec dictionary from a module
- :arg params: Dictionary of all module parameters
+def _get_legal_inputs(argument_spec, parameters, aliases=None):
+ if aliases is None:
+ aliases = _handle_aliases(argument_spec, parameters)
+
+ return list(aliases.keys()) + list(argument_spec.keys())
+
- :returns: Set of strings that should be hidden from output::
+def _get_unsupported_parameters(argument_spec, parameters, legal_inputs=None, options_context=None):
+ """Check keys in parameters against those provided in legal_inputs
+ to ensure they contain legal values. If legal_inputs are not supplied,
+ they will be generated using the argument_spec.
- {'secret_dict_value', 'secret_list_item_one', 'secret_list_item_two', 'secret_string'}
+ :arg argument_spec: Dictionary of parameters, their type, and valid values.
+ :arg parameters: Dictionary of parameters.
+ :arg legal_inputs: List of valid key names property names. Overrides values
+ in argument_spec.
+ :arg options_context: List of parent keys for tracking the context of where
+ a parameter is defined.
+
+ :returns: Set of unsupported parameters. Empty set if no unsupported parameters
+ are found.
"""
- no_log_values = set()
- for arg_name, arg_opts in argument_spec.items():
- if arg_opts.get('no_log', False):
- # Find the value for the no_log'd param
- no_log_object = params.get(arg_name, None)
+ if legal_inputs is None:
+ legal_inputs = _get_legal_inputs(argument_spec, parameters)
- if no_log_object:
- try:
- no_log_values.update(_return_datastructure_name(no_log_object))
- except TypeError as e:
- raise TypeError('Failed to convert "%s": %s' % (arg_name, to_native(e)))
+ unsupported_parameters = set()
+ for k in parameters.keys():
+ if k not in legal_inputs:
+ context = k
+ if options_context:
+ context = tuple(options_context + [k])
- # Get no_log values from suboptions
- sub_argument_spec = arg_opts.get('options')
- if sub_argument_spec is not None:
- wanted_type = arg_opts.get('type')
- sub_parameters = params.get(arg_name)
+ unsupported_parameters.add(context)
- if sub_parameters is not None:
- if wanted_type == 'dict' or (wanted_type == 'list' and arg_opts.get('elements', '') == 'dict'):
- # Sub parameters can be a dict or list of dicts. Ensure parameters are always a list.
- if not isinstance(sub_parameters, list):
- sub_parameters = [sub_parameters]
+ return unsupported_parameters
- for sub_param in sub_parameters:
- # Validate dict fields in case they came in as strings
- if isinstance(sub_param, string_types):
- sub_param = check_type_dict(sub_param)
+def _handle_aliases(argument_spec, parameters, alias_warnings=None, alias_deprecations=None):
+ """Process aliases from an argument_spec including warnings and deprecations.
- if not isinstance(sub_param, Mapping):
- raise TypeError("Value '{1}' in the sub parameter field '{0}' must by a {2}, "
- "not '{1.__class__.__name__}'".format(arg_name, sub_param, wanted_type))
+ Modify ``parameters`` by adding a new key for each alias with the supplied
+ value from ``parameters``.
- no_log_values.update(list_no_log_values(sub_argument_spec, sub_param))
+ If a list is provided to the alias_warnings parameter, it will be filled with tuples
+ (option, alias) in every case where both an option and its alias are specified.
- return no_log_values
+ If a list is provided to alias_deprecations, it will be populated with dictionaries,
+ each containing deprecation information for each alias found in argument_spec.
+
+ :param argument_spec: Dictionary of parameters, their type, and valid values.
+ :type argument_spec: dict
+
+ :param parameters: Dictionary of parameters.
+ :type parameters: dict
+
+ :param alias_warnings:
+ :type alias_warnings: list
+
+ :param alias_deprecations:
+ :type alias_deprecations: list
+ """
+
+ aliases_results = {} # alias:canon
+
+ for (k, v) in argument_spec.items():
+ aliases = v.get('aliases', None)
+ default = v.get('default', None)
+ required = v.get('required', False)
+
+ if alias_deprecations is not None:
+ for alias in argument_spec[k].get('deprecated_aliases', []):
+ if alias.get('name') in parameters:
+ alias_deprecations.append(alias)
+
+ if default is not None and required:
+ # not alias specific but this is a good place to check this
+ raise ValueError("internal error: required and default are mutually exclusive for %s" % k)
+ if aliases is None:
+ continue
-def list_deprecations(argument_spec, params, prefix=''):
+ if not is_iterable(aliases) or isinstance(aliases, (binary_type, text_type)):
+ raise TypeError('internal error: aliases must be a list or tuple')
+
+ for alias in aliases:
+ aliases_results[alias] = k
+ if alias in parameters:
+ if k in parameters and alias_warnings is not None:
+ alias_warnings.append((k, alias))
+ parameters[k] = parameters[alias]
+
+ return aliases_results
+
+
+def _list_deprecations(argument_spec, parameters, prefix=''):
"""Return a list of deprecations
- :arg argument_spec: An argument spec dictionary from a module
- :arg params: Dictionary of all module parameters
+ :arg argument_spec: An argument spec dictionary
+ :arg parameters: Dictionary of parameters
:returns: List of dictionaries containing a message and version in which
- the deprecated parameter will be removed, or an empty list::
+ the deprecated parameter will be removed, or an empty list.
+
+ :Example return:
- [{'msg': "Param 'deptest' is deprecated. See the module docs for more information", 'version': '2.9'}]
+ .. code-block:: python
+
+ [
+ {
+ 'msg': "Param 'deptest' is deprecated. See the module docs for more information",
+ 'version': '2.9'
+ }
+ ]
"""
deprecations = []
for arg_name, arg_opts in argument_spec.items():
- if arg_name in params:
+ if arg_name in parameters:
if prefix:
sub_prefix = '%s["%s"]' % (prefix, arg_name)
else:
@@ -152,46 +285,617 @@ def list_deprecations(argument_spec, params, prefix=''):
# Check sub-argument spec
sub_argument_spec = arg_opts.get('options')
if sub_argument_spec is not None:
- sub_arguments = params[arg_name]
+ sub_arguments = parameters[arg_name]
if isinstance(sub_arguments, Mapping):
sub_arguments = [sub_arguments]
if isinstance(sub_arguments, list):
for sub_params in sub_arguments:
if isinstance(sub_params, Mapping):
- deprecations.extend(list_deprecations(sub_argument_spec, sub_params, prefix=sub_prefix))
+ deprecations.extend(_list_deprecations(sub_argument_spec, sub_params, prefix=sub_prefix))
return deprecations
-def handle_aliases(argument_spec, params, alias_warnings=None):
- """Return a two item tuple. The first is a dictionary of aliases, the second is
- a list of legal inputs.
+def _list_no_log_values(argument_spec, params):
+ """Return set of no log values
- If a list is provided to the alias_warnings parameter, it will be filled with tuples
- (option, alias) in every case where both an option and its alias are specified.
+ :arg argument_spec: An argument spec dictionary
+ :arg params: Dictionary of all parameters
+
+ :returns: :class:`set` of strings that should be hidden from output:
"""
- legal_inputs = ['_ansible_%s' % k for k in PASS_VARS]
- aliases_results = {} # alias:canon
+ no_log_values = set()
+ for arg_name, arg_opts in argument_spec.items():
+ if arg_opts.get('no_log', False):
+ # Find the value for the no_log'd param
+ no_log_object = params.get(arg_name, None)
- for (k, v) in argument_spec.items():
- legal_inputs.append(k)
- aliases = v.get('aliases', None)
- default = v.get('default', None)
- required = v.get('required', False)
- if default is not None and required:
- # not alias specific but this is a good place to check this
- raise ValueError("internal error: required and default are mutually exclusive for %s" % k)
- if aliases is None:
+ if no_log_object:
+ try:
+ no_log_values.update(_return_datastructure_name(no_log_object))
+ except TypeError as e:
+ raise TypeError('Failed to convert "%s": %s' % (arg_name, to_native(e)))
+
+ # Get no_log values from suboptions
+ sub_argument_spec = arg_opts.get('options')
+ if sub_argument_spec is not None:
+ wanted_type = arg_opts.get('type')
+ sub_parameters = params.get(arg_name)
+
+ if sub_parameters is not None:
+ if wanted_type == 'dict' or (wanted_type == 'list' and arg_opts.get('elements', '') == 'dict'):
+ # Sub parameters can be a dict or list of dicts. Ensure parameters are always a list.
+ if not isinstance(sub_parameters, list):
+ sub_parameters = [sub_parameters]
+
+ for sub_param in sub_parameters:
+ # Validate dict fields in case they came in as strings
+
+ if isinstance(sub_param, string_types):
+ sub_param = check_type_dict(sub_param)
+
+ if not isinstance(sub_param, Mapping):
+ raise TypeError("Value '{1}' in the sub parameter field '{0}' must by a {2}, "
+ "not '{1.__class__.__name__}'".format(arg_name, sub_param, wanted_type))
+
+ no_log_values.update(_list_no_log_values(sub_argument_spec, sub_param))
+
+ return no_log_values
+
+
+def _return_datastructure_name(obj):
+ """ Return native stringified values from datastructures.
+
+ For use with removing sensitive values pre-jsonification."""
+ if isinstance(obj, (text_type, binary_type)):
+ if obj:
+ yield to_native(obj, errors='surrogate_or_strict')
+ return
+ elif isinstance(obj, Mapping):
+ for element in obj.items():
+ for subelement in _return_datastructure_name(element[1]):
+ yield subelement
+ elif is_iterable(obj):
+ for element in obj:
+ for subelement in _return_datastructure_name(element):
+ yield subelement
+ elif isinstance(obj, (bool, NoneType)):
+ # This must come before int because bools are also ints
+ return
+ elif isinstance(obj, tuple(list(integer_types) + [float])):
+ yield to_native(obj, nonstring='simplerepr')
+ else:
+ raise TypeError('Unknown parameter type: %s' % (type(obj)))
+
+
+def _remove_values_conditions(value, no_log_strings, deferred_removals):
+ """
+ Helper function for :meth:`remove_values`.
+
+ :arg value: The value to check for strings that need to be stripped
+ :arg no_log_strings: set of strings which must be stripped out of any values
+ :arg deferred_removals: List which holds information about nested
+ containers that have to be iterated for removals. It is passed into
+ this function so that more entries can be added to it if value is
+ a container type. The format of each entry is a 2-tuple where the first
+ element is the ``value`` parameter and the second value is a new
+ container to copy the elements of ``value`` into once iterated.
+
+ :returns: if ``value`` is a scalar, returns ``value`` with two exceptions:
+
+ 1. :class:`~datetime.datetime` objects which are changed into a string representation.
+ 2. objects which are in ``no_log_strings`` are replaced with a placeholder
+ so that no sensitive data is leaked.
+
+ If ``value`` is a container type, returns a new empty container.
+
+ ``deferred_removals`` is added to as a side-effect of this function.
+
+ .. warning:: It is up to the caller to make sure the order in which value
+ is passed in is correct. For instance, higher level containers need
+ to be passed in before lower level containers. For example, given
+ ``{'level1': {'level2': 'level3': [True]} }`` first pass in the
+ dictionary for ``level1``, then the dict for ``level2``, and finally
+ the list for ``level3``.
+ """
+ if isinstance(value, (text_type, binary_type)):
+ # Need native str type
+ native_str_value = value
+ if isinstance(value, text_type):
+ value_is_text = True
+ if PY2:
+ native_str_value = to_bytes(value, errors='surrogate_or_strict')
+ elif isinstance(value, binary_type):
+ value_is_text = False
+ if PY3:
+ native_str_value = to_text(value, errors='surrogate_or_strict')
+
+ if native_str_value in no_log_strings:
+ return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
+ for omit_me in no_log_strings:
+ native_str_value = native_str_value.replace(omit_me, '*' * 8)
+
+ if value_is_text and isinstance(native_str_value, binary_type):
+ value = to_text(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
+ elif not value_is_text and isinstance(native_str_value, text_type):
+ value = to_bytes(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
+ else:
+ value = native_str_value
+
+ elif isinstance(value, Sequence):
+ if isinstance(value, MutableSequence):
+ new_value = type(value)()
+ else:
+ new_value = [] # Need a mutable value
+ deferred_removals.append((value, new_value))
+ value = new_value
+
+ elif isinstance(value, Set):
+ if isinstance(value, MutableSet):
+ new_value = type(value)()
+ else:
+ new_value = set() # Need a mutable value
+ deferred_removals.append((value, new_value))
+ value = new_value
+
+ elif isinstance(value, Mapping):
+ if isinstance(value, MutableMapping):
+ new_value = type(value)()
+ else:
+ new_value = {} # Need a mutable value
+ deferred_removals.append((value, new_value))
+ value = new_value
+
+ elif isinstance(value, tuple(chain(integer_types, (float, bool, NoneType)))):
+ stringy_value = to_native(value, encoding='utf-8', errors='surrogate_or_strict')
+ if stringy_value in no_log_strings:
+ return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
+ for omit_me in no_log_strings:
+ if omit_me in stringy_value:
+ return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
+
+ elif isinstance(value, (datetime.datetime, datetime.date)):
+ value = value.isoformat()
+ else:
+ raise TypeError('Value of unknown type: %s, %s' % (type(value), value))
+
+ return value
+
+
+def _set_defaults(argument_spec, parameters, set_default=True):
+ """Set default values for parameters when no value is supplied.
+
+ Modifies parameters directly.
+
+ :arg argument_spec: Argument spec
+ :type argument_spec: dict
+
+ :arg parameters: Parameters to evaluate
+ :type parameters: dict
+
+ :kwarg set_default: Whether or not to set the default values
+ :type set_default: bool
+
+ :returns: Set of strings that should not be logged.
+ :rtype: set
+ """
+
+ no_log_values = set()
+ for param, value in argument_spec.items():
+
+ # TODO: Change the default value from None to Sentinel to differentiate between
+ # user supplied None and a default value set by this function.
+ default = value.get('default', None)
+
+ # This prevents setting defaults on required items on the 1st run,
+ # otherwise will set things without a default to None on the 2nd.
+ if param not in parameters and (default is not None or set_default):
+ # Make sure any default value for no_log fields are masked.
+ if value.get('no_log', False) and default:
+ no_log_values.add(default)
+
+ parameters[param] = default
+
+ return no_log_values
+
+
+def _sanitize_keys_conditions(value, no_log_strings, ignore_keys, deferred_removals):
+ """ Helper method to :func:`sanitize_keys` to build ``deferred_removals`` and avoid deep recursion. """
+ if isinstance(value, (text_type, binary_type)):
+ return value
+
+ if isinstance(value, Sequence):
+ if isinstance(value, MutableSequence):
+ new_value = type(value)()
+ else:
+ new_value = [] # Need a mutable value
+ deferred_removals.append((value, new_value))
+ return new_value
+
+ if isinstance(value, Set):
+ if isinstance(value, MutableSet):
+ new_value = type(value)()
+ else:
+ new_value = set() # Need a mutable value
+ deferred_removals.append((value, new_value))
+ return new_value
+
+ if isinstance(value, Mapping):
+ if isinstance(value, MutableMapping):
+ new_value = type(value)()
+ else:
+ new_value = {} # Need a mutable value
+ deferred_removals.append((value, new_value))
+ return new_value
+
+ if isinstance(value, tuple(chain(integer_types, (float, bool, NoneType)))):
+ return value
+
+ if isinstance(value, (datetime.datetime, datetime.date)):
+ return value
+
+ raise TypeError('Value of unknown type: %s, %s' % (type(value), value))
+
+
+def _validate_elements(wanted_type, parameter, values, options_context=None, errors=None):
+
+ if errors is None:
+ errors = AnsibleValidationErrorMultiple()
+
+ type_checker, wanted_element_type = _get_type_validator(wanted_type)
+ validated_parameters = []
+ # Get param name for strings so we can later display this value in a useful error message if needed
+ # Only pass 'kwargs' to our checkers and ignore custom callable checkers
+ kwargs = {}
+ if wanted_element_type == 'str' and isinstance(wanted_type, string_types):
+ if isinstance(parameter, string_types):
+ kwargs['param'] = parameter
+ elif isinstance(parameter, dict):
+ kwargs['param'] = list(parameter.keys())[0]
+
+ for value in values:
+ try:
+ validated_parameters.append(type_checker(value, **kwargs))
+ except (TypeError, ValueError) as e:
+ msg = "Elements value for option '%s'" % parameter
+ if options_context:
+ msg += " found in '%s'" % " -> ".join(options_context)
+ msg += " is of type %s and we were unable to convert to %s: %s" % (type(value), wanted_element_type, to_native(e))
+ errors.append(ElementError(msg))
+ return validated_parameters
+
+
+def _validate_argument_types(argument_spec, parameters, prefix='', options_context=None, errors=None):
+ """Validate that parameter types match the type in the argument spec.
+
+ Determine the appropriate type checker function and run each
+ parameter value through that function. All error messages from type checker
+ functions are returned. If any parameter fails to validate, it will not
+ be in the returned parameters.
+
+ :arg argument_spec: Argument spec
+ :type argument_spec: dict
+
+ :arg parameters: Parameters
+ :type parameters: dict
+
+ :kwarg prefix: Name of the parent key that contains the spec. Used in the error message
+ :type prefix: str
+
+ :kwarg options_context: List of contexts?
+ :type options_context: list
+
+ :returns: Two item tuple containing validated and coerced parameters
+ and a list of any errors that were encountered.
+ :rtype: tuple
+
+ """
+
+ if errors is None:
+ errors = AnsibleValidationErrorMultiple()
+
+ for param, spec in argument_spec.items():
+ if param not in parameters:
continue
- if not is_iterable(aliases) or isinstance(aliases, (binary_type, text_type)):
- raise TypeError('internal error: aliases must be a list or tuple')
- for alias in aliases:
- legal_inputs.append(alias)
- aliases_results[alias] = k
- if alias in params:
- if k in params and alias_warnings is not None:
- alias_warnings.append((k, alias))
- params[k] = params[alias]
- return aliases_results, legal_inputs
+ value = parameters[param]
+ if value is None:
+ continue
+
+ wanted_type = spec.get('type')
+ type_checker, wanted_name = _get_type_validator(wanted_type)
+ # Get param name for strings so we can later display this value in a useful error message if needed
+ # Only pass 'kwargs' to our checkers and ignore custom callable checkers
+ kwargs = {}
+ if wanted_name == 'str' and isinstance(wanted_type, string_types):
+ kwargs['param'] = list(parameters.keys())[0]
+
+ # Get the name of the parent key if this is a nested option
+ if prefix:
+ kwargs['prefix'] = prefix
+
+ try:
+ parameters[param] = type_checker(value, **kwargs)
+ elements_wanted_type = spec.get('elements', None)
+ if elements_wanted_type:
+ elements = parameters[param]
+ if wanted_type != 'list' or not isinstance(elements, list):
+ msg = "Invalid type %s for option '%s'" % (wanted_name, elements)
+ if options_context:
+ msg += " found in '%s'." % " -> ".join(options_context)
+ msg += ", elements value check is supported only with 'list' type"
+ errors.append(ArgumentTypeError(msg))
+ parameters[param] = _validate_elements(elements_wanted_type, param, elements, options_context, errors)
+
+ except (TypeError, ValueError) as e:
+ msg = "argument '%s' is of type %s" % (param, type(value))
+ if options_context:
+ msg += " found in '%s'." % " -> ".join(options_context)
+ msg += " and we were unable to convert to %s: %s" % (wanted_name, to_native(e))
+ errors.append(ArgumentTypeError(msg))
+
+
+def _validate_argument_values(argument_spec, parameters, options_context=None, errors=None):
+ """Ensure all arguments have the requested values, and there are no stray arguments"""
+
+ if errors is None:
+ errors = AnsibleValidationErrorMultiple()
+
+ for param, spec in argument_spec.items():
+ choices = spec.get('choices')
+ if choices is None:
+ continue
+
+ if isinstance(choices, (frozenset, KeysView, Sequence)) and not isinstance(choices, (binary_type, text_type)):
+ if param in parameters:
+ # Allow one or more when type='list' param with choices
+ if isinstance(parameters[param], list):
+ diff_list = ", ".join([item for item in parameters[param] if item not in choices])
+ if diff_list:
+ choices_str = ", ".join([to_native(c) for c in choices])
+ msg = "value of %s must be one or more of: %s. Got no match for: %s" % (param, choices_str, diff_list)
+ if options_context:
+ msg = "{0} found in {1}".format(msg, " -> ".join(options_context))
+ errors.append(ArgumentValueError(msg))
+ elif parameters[param] not in choices:
+ # PyYaml converts certain strings to bools. If we can unambiguously convert back, do so before checking
+ # the value. If we can't figure this out, module author is responsible.
+ lowered_choices = None
+ if parameters[param] == 'False':
+ lowered_choices = lenient_lowercase(choices)
+ overlap = BOOLEANS_FALSE.intersection(choices)
+ if len(overlap) == 1:
+ # Extract from a set
+ (parameters[param],) = overlap
+
+ if parameters[param] == 'True':
+ if lowered_choices is None:
+ lowered_choices = lenient_lowercase(choices)
+ overlap = BOOLEANS_TRUE.intersection(choices)
+ if len(overlap) == 1:
+ (parameters[param],) = overlap
+
+ if parameters[param] not in choices:
+ choices_str = ", ".join([to_native(c) for c in choices])
+ msg = "value of %s must be one of: %s, got: %s" % (param, choices_str, parameters[param])
+ if options_context:
+ msg = "{0} found in {1}".format(msg, " -> ".join(options_context))
+ errors.append(ArgumentValueError(msg))
+ else:
+ msg = "internal error: choices for argument %s are not iterable: %s" % (param, choices)
+ if options_context:
+ msg = "{0} found in {1}".format(msg, " -> ".join(options_context))
+ errors.append(ArgumentTypeError(msg))
+
+
+def _validate_sub_spec(argument_spec, parameters, prefix='', options_context=None, errors=None, no_log_values=None, unsupported_parameters=None):
+ """Validate sub argument spec.
+
+ This function is recursive.
+ """
+
+ if options_context is None:
+ options_context = []
+
+ if errors is None:
+ errors = AnsibleValidationErrorMultiple()
+
+ if no_log_values is None:
+ no_log_values = set()
+
+ if unsupported_parameters is None:
+ unsupported_parameters = set()
+
+ for param, value in argument_spec.items():
+ wanted = value.get('type')
+ if wanted == 'dict' or (wanted == 'list' and value.get('elements', '') == 'dict'):
+ sub_spec = value.get('options')
+ if value.get('apply_defaults', False):
+ if sub_spec is not None:
+ if parameters.get(param) is None:
+ parameters[param] = {}
+ else:
+ continue
+ elif sub_spec is None or param not in parameters or parameters[param] is None:
+ continue
+
+ # Keep track of context for warning messages
+ options_context.append(param)
+
+ # Make sure we can iterate over the elements
+ if isinstance(parameters[param], dict):
+ elements = [parameters[param]]
+ else:
+ elements = parameters[param]
+
+ for idx, sub_parameters in enumerate(elements):
+ if not isinstance(sub_parameters, dict):
+ errors.append(SubParameterTypeError("value of '%s' must be of type dict or list of dicts" % param))
+
+ # Set prefix for warning messages
+ new_prefix = prefix + param
+ if wanted == 'list':
+ new_prefix += '[%d]' % idx
+ new_prefix += '.'
+
+ no_log_values.update(set_fallbacks(sub_spec, sub_parameters))
+
+ alias_warnings = []
+ alias_deprecations = []
+ try:
+ options_aliases = _handle_aliases(sub_spec, sub_parameters, alias_warnings, alias_deprecations)
+ except (TypeError, ValueError) as e:
+ options_aliases = {}
+ errors.append(AliasError(to_native(e)))
+
+ for option, alias in alias_warnings:
+ warn('Both option %s and its alias %s are set.' % (option, alias))
+
+ try:
+ no_log_values.update(_list_no_log_values(sub_spec, sub_parameters))
+ except TypeError as te:
+ errors.append(NoLogError(to_native(te)))
+
+ legal_inputs = _get_legal_inputs(sub_spec, sub_parameters, options_aliases)
+ unsupported_parameters.update(_get_unsupported_parameters(sub_spec, sub_parameters, legal_inputs, options_context))
+
+ try:
+ check_mutually_exclusive(value.get('mutually_exclusive'), sub_parameters, options_context)
+ except TypeError as e:
+ errors.append(MutuallyExclusiveError(to_native(e)))
+
+ no_log_values.update(_set_defaults(sub_spec, sub_parameters, False))
+
+ try:
+ check_required_arguments(sub_spec, sub_parameters, options_context)
+ except TypeError as e:
+ errors.append(RequiredError(to_native(e)))
+
+ _validate_argument_types(sub_spec, sub_parameters, new_prefix, options_context, errors=errors)
+ _validate_argument_values(sub_spec, sub_parameters, options_context, errors=errors)
+
+ for check in _ADDITIONAL_CHECKS:
+ try:
+ check['func'](value.get(check['attr']), sub_parameters, options_context)
+ except TypeError as e:
+ errors.append(check['err'](to_native(e)))
+
+ no_log_values.update(_set_defaults(sub_spec, sub_parameters))
+
+ # Handle nested specs
+ _validate_sub_spec(sub_spec, sub_parameters, new_prefix, options_context, errors, no_log_values, unsupported_parameters)
+
+ options_context.pop()
+
+
+def env_fallback(*args, **kwargs):
+ """Load value from environment variable"""
+
+ for arg in args:
+ if arg in os.environ:
+ return os.environ[arg]
+ raise AnsibleFallbackNotFound
+
+
+def set_fallbacks(argument_spec, parameters):
+ no_log_values = set()
+ for param, value in argument_spec.items():
+ fallback = value.get('fallback', (None,))
+ fallback_strategy = fallback[0]
+ fallback_args = []
+ fallback_kwargs = {}
+ if param not in parameters and fallback_strategy is not None:
+ for item in fallback[1:]:
+ if isinstance(item, dict):
+ fallback_kwargs = item
+ else:
+ fallback_args = item
+ try:
+ fallback_value = fallback_strategy(*fallback_args, **fallback_kwargs)
+ except AnsibleFallbackNotFound:
+ continue
+ else:
+ if value.get('no_log', False) and fallback_value:
+ no_log_values.add(fallback_value)
+ parameters[param] = fallback_value
+
+ return no_log_values
+
+
+def sanitize_keys(obj, no_log_strings, ignore_keys=frozenset()):
+ """Sanitize the keys in a container object by removing ``no_log`` values from key names.
+
+ This is a companion function to the :func:`remove_values` function. Similar to that function,
+ we make use of ``deferred_removals`` to avoid hitting maximum recursion depth in cases of
+ large data structures.
+
+ :arg obj: The container object to sanitize. Non-container objects are returned unmodified.
+ :arg no_log_strings: A set of string values we do not want logged.
+ :kwarg ignore_keys: A set of string values of keys to not sanitize.
+
+ :returns: An object with sanitized keys.
+ """
+
+ deferred_removals = deque()
+
+ no_log_strings = [to_native(s, errors='surrogate_or_strict') for s in no_log_strings]
+ new_value = _sanitize_keys_conditions(obj, no_log_strings, ignore_keys, deferred_removals)
+
+ while deferred_removals:
+ old_data, new_data = deferred_removals.popleft()
+
+ if isinstance(new_data, Mapping):
+ for old_key, old_elem in old_data.items():
+ if old_key in ignore_keys or old_key.startswith('_ansible'):
+ new_data[old_key] = _sanitize_keys_conditions(old_elem, no_log_strings, ignore_keys, deferred_removals)
+ else:
+ # Sanitize the old key. We take advantage of the sanitizing code in
+ # _remove_values_conditions() rather than recreating it here.
+ new_key = _remove_values_conditions(old_key, no_log_strings, None)
+ new_data[new_key] = _sanitize_keys_conditions(old_elem, no_log_strings, ignore_keys, deferred_removals)
+ else:
+ for elem in old_data:
+ new_elem = _sanitize_keys_conditions(elem, no_log_strings, ignore_keys, deferred_removals)
+ if isinstance(new_data, MutableSequence):
+ new_data.append(new_elem)
+ elif isinstance(new_data, MutableSet):
+ new_data.add(new_elem)
+ else:
+ raise TypeError('Unknown container type encountered when removing private values from keys')
+
+ return new_value
+
+
+def remove_values(value, no_log_strings):
+ """Remove strings in ``no_log_strings`` from value.
+
+ If value is a container type, then remove a lot more.
+
+ Use of ``deferred_removals`` exists, rather than a pure recursive solution,
+ because of the potential to hit the maximum recursion depth when dealing with
+ large amounts of data (see `issue #24560 <https://github.com/ansible/ansible/issues/24560>`_).
+ """
+
+ deferred_removals = deque()
+
+ no_log_strings = [to_native(s, errors='surrogate_or_strict') for s in no_log_strings]
+ new_value = _remove_values_conditions(value, no_log_strings, deferred_removals)
+
+ while deferred_removals:
+ old_data, new_data = deferred_removals.popleft()
+ if isinstance(new_data, Mapping):
+ for old_key, old_elem in old_data.items():
+ new_elem = _remove_values_conditions(old_elem, no_log_strings, deferred_removals)
+ new_data[old_key] = new_elem
+ else:
+ for elem in old_data:
+ new_elem = _remove_values_conditions(elem, no_log_strings, deferred_removals)
+ if isinstance(new_data, MutableSequence):
+ new_data.append(new_elem)
+ elif isinstance(new_data, MutableSet):
+ new_data.add(new_elem)
+ else:
+ raise TypeError('Unknown container type encountered when removing private values from output')
+
+ return new_value
diff --git a/lib/ansible/module_utils/common/process.py b/lib/ansible/module_utils/common/process.py
index 91e818a0..f128cd98 100644
--- a/lib/ansible/module_utils/common/process.py
+++ b/lib/ansible/module_utils/common/process.py
@@ -39,6 +39,6 @@ def get_bin_path(arg, opt_dirs=None, required=None):
bin_path = path
break
if bin_path is None:
- raise ValueError('Failed to find required executable %s in paths: %s' % (arg, os.pathsep.join(paths)))
+ raise ValueError('Failed to find required executable "%s" in paths: %s' % (arg, os.pathsep.join(paths)))
return bin_path
diff --git a/lib/ansible/module_utils/common/respawn.py b/lib/ansible/module_utils/common/respawn.py
new file mode 100644
index 00000000..3bc526af
--- /dev/null
+++ b/lib/ansible/module_utils/common/respawn.py
@@ -0,0 +1,98 @@
+# Copyright: (c) 2021, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import subprocess
+import sys
+
+from ansible.module_utils.common.text.converters import to_bytes, to_native
+
+
+def has_respawned():
+ return hasattr(sys.modules['__main__'], '_respawned')
+
+
+def respawn_module(interpreter_path):
+ """
+ Respawn the currently-running Ansible Python module under the specified Python interpreter.
+
+ Ansible modules that require libraries that are typically available only under well-known interpreters
+ (eg, ``yum``, ``apt``, ``dnf``) can use bespoke logic to determine the libraries they need are not
+ available, then call `respawn_module` to re-execute the current module under a different interpreter
+ and exit the current process when the new subprocess has completed. The respawned process inherits only
+ stdout/stderr from the current process.
+
+ Only a single respawn is allowed. ``respawn_module`` will fail on nested respawns. Modules are encouraged
+ to call `has_respawned()` to defensively guide behavior before calling ``respawn_module``, and to ensure
+ that the target interpreter exists, as ``respawn_module`` will not fail gracefully.
+
+ :arg interpreter_path: path to a Python interpreter to respawn the current module
+ """
+
+ if has_respawned():
+ raise Exception('module has already been respawned')
+
+ # FUTURE: we need a safe way to log that a respawn has occurred for forensic/debug purposes
+ payload = _create_payload()
+ stdin_read, stdin_write = os.pipe()
+ os.write(stdin_write, to_bytes(payload))
+ os.close(stdin_write)
+ rc = subprocess.call([interpreter_path, '--'], stdin=stdin_read)
+ sys.exit(rc) # pylint: disable=ansible-bad-function
+
+
+def probe_interpreters_for_module(interpreter_paths, module_name):
+ """
+ Probes a supplied list of Python interpreters, returning the first one capable of
+ importing the named module. This is useful when attempting to locate a "system
+ Python" where OS-packaged utility modules are located.
+
+ :arg interpreter_paths: iterable of paths to Python interpreters. The paths will be probed
+ in order, and the first path that exists and can successfully import the named module will
+ be returned (or ``None`` if probing fails for all supplied paths).
+ :arg module_name: fully-qualified Python module name to probe for (eg, ``selinux``)
+ """
+ for interpreter_path in interpreter_paths:
+ if not os.path.exists(interpreter_path):
+ continue
+ try:
+ rc = subprocess.call([interpreter_path, '-c', 'import {0}'.format(module_name)])
+ if rc == 0:
+ return interpreter_path
+ except Exception:
+ continue
+
+ return None
+
+
+def _create_payload():
+ from ansible.module_utils import basic
+ smuggled_args = getattr(basic, '_ANSIBLE_ARGS')
+ if not smuggled_args:
+ raise Exception('unable to access ansible.module_utils.basic._ANSIBLE_ARGS (not launched by AnsiballZ?)')
+ module_fqn = sys.modules['__main__']._module_fqn
+ modlib_path = sys.modules['__main__']._modlib_path
+ respawn_code_template = '''
+import runpy
+import sys
+
+module_fqn = '{module_fqn}'
+modlib_path = '{modlib_path}'
+smuggled_args = b"""{smuggled_args}""".strip()
+
+
+if __name__ == '__main__':
+ sys.path.insert(0, modlib_path)
+
+ from ansible.module_utils import basic
+ basic._ANSIBLE_ARGS = smuggled_args
+
+ runpy.run_module(module_fqn, init_globals=dict(_respawned=True), run_name='__main__', alter_sys=True)
+ '''
+
+ respawn_code = respawn_code_template.format(module_fqn=module_fqn, modlib_path=modlib_path, smuggled_args=to_native(smuggled_args))
+
+ return respawn_code
diff --git a/lib/ansible/module_utils/common/validation.py b/lib/ansible/module_utils/common/validation.py
index fc13f4d0..7bb1add3 100644
--- a/lib/ansible/module_utils/common/validation.py
+++ b/lib/ansible/module_utils/common/validation.py
@@ -9,7 +9,7 @@ import os
import re
from ast import literal_eval
-from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils._text import to_native
from ansible.module_utils.common._json_compat import json
from ansible.module_utils.common.collections import is_iterable
from ansible.module_utils.common.text.converters import jsonify
@@ -23,11 +23,11 @@ from ansible.module_utils.six import (
)
-def count_terms(terms, module_parameters):
+def count_terms(terms, parameters):
"""Count the number of occurrences of a key in a given dictionary
:arg terms: String or iterable of values to check
- :arg module_parameters: Dictionary of module parameters
+ :arg parameters: Dictionary of parameters
:returns: An integer that is the number of occurrences of the terms values
in the provided dictionary.
@@ -36,19 +36,47 @@ def count_terms(terms, module_parameters):
if not is_iterable(terms):
terms = [terms]
- return len(set(terms).intersection(module_parameters))
+ return len(set(terms).intersection(parameters))
-def check_mutually_exclusive(terms, module_parameters):
+def safe_eval(value, locals=None, include_exceptions=False):
+ # do not allow method calls to modules
+ if not isinstance(value, string_types):
+ # already templated to a datavaluestructure, perhaps?
+ if include_exceptions:
+ return (value, None)
+ return value
+ if re.search(r'\w\.\w+\(', value):
+ if include_exceptions:
+ return (value, None)
+ return value
+ # do not allow imports
+ if re.search(r'import \w+', value):
+ if include_exceptions:
+ return (value, None)
+ return value
+ try:
+ result = literal_eval(value)
+ if include_exceptions:
+ return (result, None)
+ else:
+ return result
+ except Exception as e:
+ if include_exceptions:
+ return (value, e)
+ return value
+
+
+def check_mutually_exclusive(terms, parameters, options_context=None):
"""Check mutually exclusive terms against argument parameters
Accepts a single list or list of lists that are groups of terms that should be
mutually exclusive with one another
- :arg terms: List of mutually exclusive module parameters
- :arg module_parameters: Dictionary of module parameters
+ :arg terms: List of mutually exclusive parameters
+ :arg parameters: Dictionary of parameters
- :returns: Empty list or raises TypeError if the check fails.
+ :returns: Empty list or raises :class:`TypeError` if the check fails.
"""
results = []
@@ -56,19 +84,21 @@ def check_mutually_exclusive(terms, module_parameters):
return results
for check in terms:
- count = count_terms(check, module_parameters)
+ count = count_terms(check, parameters)
if count > 1:
results.append(check)
if results:
full_list = ['|'.join(check) for check in results]
msg = "parameters are mutually exclusive: %s" % ', '.join(full_list)
+ if options_context:
+ msg = "{0} found in {1}".format(msg, " -> ".join(options_context))
raise TypeError(to_native(msg))
return results
-def check_required_one_of(terms, module_parameters):
+def check_required_one_of(terms, parameters, options_context=None):
"""Check each list of terms to ensure at least one exists in the given module
parameters
@@ -76,9 +106,11 @@ def check_required_one_of(terms, module_parameters):
:arg terms: List of lists of terms to check. For each list of terms, at
least one is required.
- :arg module_parameters: Dictionary of module parameters
+ :arg parameters: Dictionary of parameters
+ :kwarg options_context: List of strings of parent key names if ``terms`` are
+ in a sub spec.
- :returns: Empty list or raises TypeError if the check fails.
+ :returns: Empty list or raises :class:`TypeError` if the check fails.
"""
results = []
@@ -86,30 +118,32 @@ def check_required_one_of(terms, module_parameters):
return results
for term in terms:
- count = count_terms(term, module_parameters)
+ count = count_terms(term, parameters)
if count == 0:
results.append(term)
if results:
for term in results:
msg = "one of the following is required: %s" % ', '.join(term)
+ if options_context:
+ msg = "{0} found in {1}".format(msg, " -> ".join(options_context))
raise TypeError(to_native(msg))
return results
-def check_required_together(terms, module_parameters):
+def check_required_together(terms, parameters, options_context=None):
"""Check each list of terms to ensure every parameter in each list exists
- in the given module parameters
+ in the given parameters.
- Accepts a list of lists or tuples
+ Accepts a list of lists or tuples.
:arg terms: List of lists of terms to check. Each list should include
parameters that are all required when at least one is specified
- in the module_parameters.
- :arg module_parameters: Dictionary of module parameters
+ in the parameters.
+ :arg parameters: Dictionary of parameters
- :returns: Empty list or raises TypeError if the check fails.
+ :returns: Empty list or raises :class:`TypeError` if the check fails.
"""
results = []
@@ -117,7 +151,7 @@ def check_required_together(terms, module_parameters):
return results
for term in terms:
- counts = [count_terms(field, module_parameters) for field in term]
+ counts = [count_terms(field, parameters) for field in term]
non_zero = [c for c in counts if c > 0]
if len(non_zero) > 0:
if 0 in counts:
@@ -125,21 +159,23 @@ def check_required_together(terms, module_parameters):
if results:
for term in results:
msg = "parameters are required together: %s" % ', '.join(term)
+ if options_context:
+ msg = "{0} found in {1}".format(msg, " -> ".join(options_context))
raise TypeError(to_native(msg))
return results
-def check_required_by(requirements, module_parameters):
+def check_required_by(requirements, parameters, options_context=None):
"""For each key in requirements, check the corresponding list to see if they
- exist in module_parameters
+ exist in parameters.
- Accepts a single string or list of values for each key
+ Accepts a single string or list of values for each key.
:arg requirements: Dictionary of requirements
- :arg module_parameters: Dictionary of module parameters
+ :arg parameters: Dictionary of parameters
- :returns: Empty dictionary or raises TypeError if the
+ :returns: Empty dictionary or raises :class:`TypeError` if the
"""
result = {}
@@ -147,36 +183,38 @@ def check_required_by(requirements, module_parameters):
return result
for (key, value) in requirements.items():
- if key not in module_parameters or module_parameters[key] is None:
+ if key not in parameters or parameters[key] is None:
continue
result[key] = []
# Support strings (single-item lists)
if isinstance(value, string_types):
value = [value]
for required in value:
- if required not in module_parameters or module_parameters[required] is None:
+ if required not in parameters or parameters[required] is None:
result[key].append(required)
if result:
for key, missing in result.items():
if len(missing) > 0:
msg = "missing parameter(s) required by '%s': %s" % (key, ', '.join(missing))
+ if options_context:
+ msg = "{0} found in {1}".format(msg, " -> ".join(options_context))
raise TypeError(to_native(msg))
return result
-def check_required_arguments(argument_spec, module_parameters):
- """Check all paramaters in argument_spec and return a list of parameters
- that are required but not present in module_parameters
+def check_required_arguments(argument_spec, parameters, options_context=None):
+ """Check all parameters in argument_spec and return a list of parameters
+ that are required but not present in parameters.
- Raises TypeError if the check fails
+ Raises :class:`TypeError` if the check fails
- :arg argument_spec: Argument spec dicitionary containing all parameters
+ :arg argument_spec: Argument spec dictionary containing all parameters
and their specification
- :arg module_paramaters: Dictionary of module parameters
+ :arg parameters: Dictionary of parameters
- :returns: Empty list or raises TypeError if the check fails.
+ :returns: Empty list or raises :class:`TypeError` if the check fails.
"""
missing = []
@@ -185,45 +223,53 @@ def check_required_arguments(argument_spec, module_parameters):
for (k, v) in argument_spec.items():
required = v.get('required', False)
- if required and k not in module_parameters:
+ if required and k not in parameters:
missing.append(k)
if missing:
msg = "missing required arguments: %s" % ", ".join(sorted(missing))
+ if options_context:
+ msg = "{0} found in {1}".format(msg, " -> ".join(options_context))
raise TypeError(to_native(msg))
return missing
-def check_required_if(requirements, module_parameters):
+def check_required_if(requirements, parameters, options_context=None):
"""Check parameters that are conditionally required
- Raises TypeError if the check fails
+ Raises :class:`TypeError` if the check fails
:arg requirements: List of lists specifying a parameter, value, parameters
required when the given parameter is the specified value, and optionally
a boolean indicating any or all parameters are required.
- Example:
- required_if=[
- ['state', 'present', ('path',), True],
- ['someint', 99, ('bool_param', 'string_param')],
- ]
+ :Example:
+
+ .. code-block:: python
+
+ required_if=[
+ ['state', 'present', ('path',), True],
+ ['someint', 99, ('bool_param', 'string_param')],
+ ]
- :arg module_paramaters: Dictionary of module parameters
+ :arg parameters: Dictionary of parameters
- :returns: Empty list or raises TypeError if the check fails.
+ :returns: Empty list or raises :class:`TypeError` if the check fails.
The results attribute of the exception contains a list of dictionaries.
- Each dictionary is the result of evaluting each item in requirements.
+ Each dictionary is the result of evaluating each item in requirements.
Each return dictionary contains the following keys:
:key missing: List of parameters that are required but missing
:key requires: 'any' or 'all'
- :key paramater: Parameter name that has the requirement
- :key value: Original value of the paramater
+ :key parameter: Parameter name that has the requirement
+ :key value: Original value of the parameter
:key requirements: Original required parameters
- Example:
+ :Example:
+
+ .. code-block:: python
+
[
{
'parameter': 'someint',
@@ -257,9 +303,9 @@ def check_required_if(requirements, module_parameters):
else:
missing['requires'] = 'all'
- if key in module_parameters and module_parameters[key] == val:
+ if key in parameters and parameters[key] == val:
for check in requirements:
- count = count_terms(check, module_parameters)
+ count = count_terms(check, parameters)
if count == 0:
missing['missing'].append(check)
if len(missing['missing']) and len(missing['missing']) >= max_missing_count:
@@ -272,29 +318,30 @@ def check_required_if(requirements, module_parameters):
for missing in results:
msg = "%s is %s but %s of the following are missing: %s" % (
missing['parameter'], missing['value'], missing['requires'], ', '.join(missing['missing']))
+ if options_context:
+ msg = "{0} found in {1}".format(msg, " -> ".join(options_context))
raise TypeError(to_native(msg))
return results
-def check_missing_parameters(module_parameters, required_parameters=None):
+def check_missing_parameters(parameters, required_parameters=None):
"""This is for checking for required params when we can not check via
argspec because we need more information than is simply given in the argspec.
- Raises TypeError if any required parameters are missing
+ Raises :class:`TypeError` if any required parameters are missing
- :arg module_paramaters: Dictionary of module parameters
- :arg required_parameters: List of parameters to look for in the given module
- parameters
+ :arg parameters: Dictionary of parameters
+ :arg required_parameters: List of parameters to look for in the given parameters.
- :returns: Empty list or raises TypeError if the check fails.
+ :returns: Empty list or raises :class:`TypeError` if the check fails.
"""
missing_params = []
if required_parameters is None:
return missing_params
for param in required_parameters:
- if not module_parameters.get(param):
+ if not parameters.get(param):
missing_params.append(param)
if missing_params:
@@ -304,35 +351,10 @@ def check_missing_parameters(module_parameters, required_parameters=None):
return missing_params
-def safe_eval(value, locals=None, include_exceptions=False):
- # do not allow method calls to modules
- if not isinstance(value, string_types):
- # already templated to a datavaluestructure, perhaps?
- if include_exceptions:
- return (value, None)
- return value
- if re.search(r'\w\.\w+\(', value):
- if include_exceptions:
- return (value, None)
- return value
- # do not allow imports
- if re.search(r'import \w+', value):
- if include_exceptions:
- return (value, None)
- return value
- try:
- result = literal_eval(value)
- if include_exceptions:
- return (result, None)
- else:
- return result
- except Exception as e:
- if include_exceptions:
- return (value, e)
- return value
-
-
-def check_type_str(value, allow_conversion=True):
+# FIXME: The param and prefix parameters here are coming from AnsibleModule._check_type_string()
+# which is using those for the warning messaged based on string conversion warning settings.
+# Not sure how to deal with that here since we don't have config state to query.
+def check_type_str(value, allow_conversion=True, param=None, prefix=''):
"""Verify that the value is a string or convert to a string.
Since unexpected changes can sometimes happen when converting to a string,
@@ -359,13 +381,13 @@ def check_type_str(value, allow_conversion=True):
def check_type_list(value):
"""Verify that the value is a list or convert to a list
- A comma separated string will be split into a list. Rases a TypeError if
- unable to convert to a list.
+ A comma separated string will be split into a list. Raises a :class:`TypeError`
+ if unable to convert to a list.
:arg value: Value to validate or convert to a list
:returns: Original value if it is already a list, single item list if a
- float, int or string without commas, or a multi-item list if a
+ float, int, or string without commas, or a multi-item list if a
comma-delimited string.
"""
if isinstance(value, list):
@@ -382,9 +404,9 @@ def check_type_list(value):
def check_type_dict(value):
"""Verify that value is a dict or convert it to a dict and return it.
- Raises TypeError if unable to convert to a dict
+ Raises :class:`TypeError` if unable to convert to a dict
- :arg value: Dict or string to convert to a dict. Accepts 'k1=v2, k2=v2'.
+ :arg value: Dict or string to convert to a dict. Accepts ``k1=v2, k2=v2``.
:returns: value converted to a dictionary
"""
@@ -436,7 +458,7 @@ def check_type_dict(value):
def check_type_bool(value):
"""Verify that the value is a bool or convert it to a bool and return it.
- Raises TypeError if unable to convert to a bool
+ Raises :class:`TypeError` if unable to convert to a bool
:arg value: String, int, or float to convert to bool. Valid booleans include:
'1', 'on', 1, '0', 0, 'n', 'f', 'false', 'true', 'y', 't', 'yes', 'no', 'off'
@@ -456,11 +478,11 @@ def check_type_int(value):
"""Verify that the value is an integer and return it or convert the value
to an integer and return it
- Raises TypeError if unable to convert to an int
+ Raises :class:`TypeError` if unable to convert to an int
:arg value: String or int to convert of verify
- :return: Int of given value
+ :return: int of given value
"""
if isinstance(value, integer_types):
return value
@@ -477,11 +499,11 @@ def check_type_int(value):
def check_type_float(value):
"""Verify that value is a float or convert it to a float and return it
- Raises TypeError if unable to convert to a float
+ Raises :class:`TypeError` if unable to convert to a float
- :arg value: Float, int, str, or bytes to verify or convert and return.
+ :arg value: float, int, str, or bytes to verify or convert and return.
- :returns: Float of given value.
+ :returns: float of given value.
"""
if isinstance(value, float):
return value
@@ -504,15 +526,14 @@ def check_type_path(value,):
def check_type_raw(value):
- """Returns the raw value
- """
+ """Returns the raw value"""
return value
def check_type_bytes(value):
"""Convert a human-readable string value to bytes
- Raises TypeError if unable to covert the value
+ Raises :class:`TypeError` if unable to covert the value
"""
try:
return human_to_bytes(value)
@@ -523,9 +544,9 @@ def check_type_bytes(value):
def check_type_bits(value):
"""Convert a human-readable string bits value to bits in integer.
- Example: check_type_bits('1Mb') returns integer 1048576.
+ Example: ``check_type_bits('1Mb')`` returns integer 1048576.
- Raises TypeError if unable to covert the value.
+ Raises :class:`TypeError` if unable to covert the value.
"""
try:
return human_to_bytes(value, isbits=True)
@@ -537,7 +558,7 @@ def check_type_jsonarg(value):
"""Return a jsonified string. Sometimes the controller turns a json string
into a dict/list so transform it back into json here
- Raises TypeError if unable to covert the value
+ Raises :class:`TypeError` if unable to covert the value
"""
if isinstance(value, (text_type, binary_type)):
diff --git a/lib/ansible/module_utils/compat/selinux.py b/lib/ansible/module_utils/compat/selinux.py
new file mode 100644
index 00000000..7191713c
--- /dev/null
+++ b/lib/ansible/module_utils/compat/selinux.py
@@ -0,0 +1,113 @@
+# Copyright: (c) 2021, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+from ansible.module_utils.common.text.converters import to_native, to_bytes
+from ctypes import CDLL, c_char_p, c_int, byref, POINTER, get_errno
+
+try:
+ _selinux_lib = CDLL('libselinux.so.1', use_errno=True)
+except OSError:
+ raise ImportError('unable to load libselinux.so')
+
+
+def _module_setup():
+ def _check_rc(rc):
+ if rc < 0:
+ errno = get_errno()
+ raise OSError(errno, os.strerror(errno))
+ return rc
+
+ binary_char_type = type(b'')
+
+ class _to_char_p:
+ @classmethod
+ def from_param(cls, strvalue):
+ if strvalue is not None and not isinstance(strvalue, binary_char_type):
+ strvalue = to_bytes(strvalue)
+
+ return strvalue
+
+ # FIXME: swap restype to errcheck
+
+ _funcmap = dict(
+ is_selinux_enabled={},
+ is_selinux_mls_enabled={},
+ lgetfilecon_raw=dict(argtypes=[_to_char_p, POINTER(c_char_p)], restype=_check_rc),
+ # NB: matchpathcon is deprecated and should be rewritten on selabel_lookup (but will be a PITA)
+ matchpathcon=dict(argtypes=[_to_char_p, c_int, POINTER(c_char_p)], restype=_check_rc),
+ security_policyvers={},
+ selinux_getenforcemode=dict(argtypes=[POINTER(c_int)]),
+ security_getenforce={},
+ lsetfilecon=dict(argtypes=[_to_char_p, _to_char_p], restype=_check_rc),
+ selinux_getpolicytype=dict(argtypes=[POINTER(c_char_p)], restype=_check_rc),
+ )
+
+ _thismod = sys.modules[__name__]
+
+ for fname, cfg in _funcmap.items():
+ fn = getattr(_selinux_lib, fname, None)
+
+ if not fn:
+ raise ImportError('missing selinux function: {0}'.format(fname))
+
+ # all ctypes pointers share the same base type
+ base_ptr_type = type(POINTER(c_int))
+ fn.argtypes = cfg.get('argtypes', None)
+ fn.restype = cfg.get('restype', c_int)
+
+ # just patch simple directly callable functions directly onto the module
+ if not fn.argtypes or not any(argtype for argtype in fn.argtypes if type(argtype) == base_ptr_type):
+ setattr(_thismod, fname, fn)
+ continue
+
+ # NB: this validation code must run after all the wrappers have been declared
+ unimplemented_funcs = set(_funcmap).difference(dir(_thismod))
+ if unimplemented_funcs:
+ raise NotImplementedError('implementation is missing functions: {0}'.format(unimplemented_funcs))
+
+
+# begin wrapper function impls
+
+def selinux_getenforcemode():
+ enforcemode = c_int()
+ rc = _selinux_lib.selinux_getenforcemode(byref(enforcemode))
+ return [rc, enforcemode.value]
+
+
+def selinux_getpolicytype():
+ con = c_char_p()
+ try:
+ rc = _selinux_lib.selinux_getpolicytype(byref(con))
+ return [rc, to_native(con.value)]
+ finally:
+ _selinux_lib.freecon(con)
+
+
+def lgetfilecon_raw(path):
+ con = c_char_p()
+ try:
+ rc = _selinux_lib.lgetfilecon_raw(path, byref(con))
+ return [rc, to_native(con.value)]
+ finally:
+ _selinux_lib.freecon(con)
+
+
+def matchpathcon(path, mode):
+ con = c_char_p()
+ try:
+ rc = _selinux_lib.matchpathcon(path, mode, byref(con))
+ return [rc, to_native(con.value)]
+ finally:
+ _selinux_lib.freecon(con)
+
+
+_module_setup()
+del _module_setup
+
+# end wrapper function impls
diff --git a/lib/ansible/module_utils/errors.py b/lib/ansible/module_utils/errors.py
new file mode 100644
index 00000000..3274b85b
--- /dev/null
+++ b/lib/ansible/module_utils/errors.py
@@ -0,0 +1,119 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+class AnsibleFallbackNotFound(Exception):
+ """Fallback validator was not found"""
+
+
+class AnsibleValidationError(Exception):
+ """Single argument spec validation error"""
+
+ def __init__(self, message):
+ super(AnsibleValidationError, self).__init__(message)
+ self.error_message = message
+ """The error message passed in when the exception was raised."""
+
+ @property
+ def msg(self):
+ """The error message passed in when the exception was raised."""
+ return self.args[0]
+
+
+class AnsibleValidationErrorMultiple(AnsibleValidationError):
+ """Multiple argument spec validation errors"""
+
+ def __init__(self, errors=None):
+ self.errors = errors[:] if errors else []
+ """:class:`list` of :class:`AnsibleValidationError` objects"""
+
+ def __getitem__(self, key):
+ return self.errors[key]
+
+ def __setitem__(self, key, value):
+ self.errors[key] = value
+
+ def __delitem__(self, key):
+ del self.errors[key]
+
+ @property
+ def msg(self):
+ """The first message from the first error in ``errors``."""
+ return self.errors[0].args[0]
+
+ @property
+ def messages(self):
+ """:class:`list` of each error message in ``errors``."""
+ return [err.msg for err in self.errors]
+
+ def append(self, error):
+ """Append a new error to ``self.errors``.
+
+ Only :class:`AnsibleValidationError` should be added.
+ """
+
+ self.errors.append(error)
+
+ def extend(self, errors):
+ """Append each item in ``errors`` to ``self.errors``. Only :class:`AnsibleValidationError` should be added."""
+ self.errors.extend(errors)
+
+
+class AliasError(AnsibleValidationError):
+ """Error handling aliases"""
+
+
+class ArgumentTypeError(AnsibleValidationError):
+ """Error with parameter type"""
+
+
+class ArgumentValueError(AnsibleValidationError):
+ """Error with parameter value"""
+
+
+class ElementError(AnsibleValidationError):
+ """Error when validating elements"""
+
+
+class MutuallyExclusiveError(AnsibleValidationError):
+ """Mutually exclusive parameters were supplied"""
+
+
+class NoLogError(AnsibleValidationError):
+ """Error converting no_log values"""
+
+
+class RequiredByError(AnsibleValidationError):
+ """Error with parameters that are required by other parameters"""
+
+
+class RequiredDefaultError(AnsibleValidationError):
+ """A required parameter was assigned a default value"""
+
+
+class RequiredError(AnsibleValidationError):
+ """Missing a required parameter"""
+
+
+class RequiredIfError(AnsibleValidationError):
+ """Error with conditionally required parameters"""
+
+
+class RequiredOneOfError(AnsibleValidationError):
+ """Error with parameters where at least one is required"""
+
+
+class RequiredTogetherError(AnsibleValidationError):
+ """Error with parameters that are required together"""
+
+
+class SubParameterTypeError(AnsibleValidationError):
+ """Incorrect type for subparameter"""
+
+
+class UnsupportedError(AnsibleValidationError):
+ """Unsupported parameters were supplied"""
diff --git a/lib/ansible/module_utils/facts/ansible_collector.py b/lib/ansible/module_utils/facts/ansible_collector.py
index 8ca0089e..7f6f576f 100644
--- a/lib/ansible/module_utils/facts/ansible_collector.py
+++ b/lib/ansible/module_utils/facts/ansible_collector.py
@@ -34,6 +34,7 @@ import sys
from ansible.module_utils.facts import timeout
from ansible.module_utils.facts import collector
+from ansible.module_utils.common.collections import is_string
class AnsibleFactCollector(collector.BaseFactCollector):
@@ -53,11 +54,14 @@ class AnsibleFactCollector(collector.BaseFactCollector):
self.filter_spec = filter_spec
def _filter(self, facts_dict, filter_spec):
- # assume a filter_spec='' is equilv to filter_spec='*'
+ # assume filter_spec='' or filter_spec=[] is equivalent to filter_spec='*'
if not filter_spec or filter_spec == '*':
return facts_dict
- return [(x, y) for x, y in facts_dict.items() if fnmatch.fnmatch(x, filter_spec)]
+ if is_string(filter_spec):
+ filter_spec = [filter_spec]
+
+ return [(x, y) for x, y in facts_dict.items() for f in filter_spec if not f or fnmatch.fnmatch(x, f)]
def collect(self, module=None, collected_facts=None):
collected_facts = collected_facts or {}
@@ -111,7 +115,7 @@ def get_ansible_collector(all_collector_classes,
gather_timeout=None,
minimal_gather_subset=None):
- filter_spec = filter_spec or '*'
+ filter_spec = filter_spec or []
gather_subset = gather_subset or ['all']
gather_timeout = gather_timeout or timeout.DEFAULT_GATHER_TIMEOUT
minimal_gather_subset = minimal_gather_subset or frozenset()
diff --git a/lib/ansible/module_utils/facts/hardware/linux.py b/lib/ansible/module_utils/facts/hardware/linux.py
index c468e685..0829d495 100644
--- a/lib/ansible/module_utils/facts/hardware/linux.py
+++ b/lib/ansible/module_utils/facts/hardware/linux.py
@@ -543,7 +543,7 @@ class LinuxHardware(Hardware):
device, mount, fstype, options = fields[0], fields[1], fields[2], fields[3]
- if not device.startswith('/') and ':/' not in device or fstype == 'none':
+ if not device.startswith(('/', '\\')) and ':/' not in device or fstype == 'none':
continue
mount_info = {'mount': mount,
@@ -700,6 +700,9 @@ class LinuxHardware(Hardware):
sg_inq = self.module.get_bin_path('sg_inq')
+ # we can get NVMe device's serial number from /sys/block/<name>/device/serial
+ serial_path = "/sys/block/%s/device/serial" % (block)
+
if sg_inq:
device = "/dev/%s" % (block)
rc, drivedata, err = self.module.run_command([sg_inq, device])
@@ -707,6 +710,10 @@ class LinuxHardware(Hardware):
serial = re.search(r"Unit serial number:\s+(\w+)", drivedata)
if serial:
d['serial'] = serial.group(1)
+ else:
+ serial = get_file_content(serial_path)
+ if serial:
+ d['serial'] = serial
for key, test in [('removable', '/removable'),
('support_discard', '/queue/discard_granularity'),
diff --git a/lib/ansible/module_utils/facts/hardware/openbsd.py b/lib/ansible/module_utils/facts/hardware/openbsd.py
index 6b666047..3bcf8ce4 100644
--- a/lib/ansible/module_utils/facts/hardware/openbsd.py
+++ b/lib/ansible/module_utils/facts/hardware/openbsd.py
@@ -17,6 +17,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
+import time
from ansible.module_utils._text import to_text
@@ -38,6 +39,7 @@ class OpenBSDHardware(Hardware):
- processor_cores
- processor_count
- processor_speed
+ - uptime_seconds
In addition, it also defines number of DMI facts and device facts.
"""
@@ -47,24 +49,18 @@ class OpenBSDHardware(Hardware):
hardware_facts = {}
self.sysctl = get_sysctl(self.module, ['hw'])
- # TODO: change name
- cpu_facts = self.get_processor_facts()
- memory_facts = self.get_memory_facts()
- device_facts = self.get_device_facts()
- dmi_facts = self.get_dmi_facts()
+ hardware_facts.update(self.get_processor_facts())
+ hardware_facts.update(self.get_memory_facts())
+ hardware_facts.update(self.get_device_facts())
+ hardware_facts.update(self.get_dmi_facts())
+ hardware_facts.update(self.get_uptime_facts())
- mount_facts = {}
+ # storage devices notorioslly prone to hang/block so they are under a timeout
try:
- mount_facts = self.get_mount_facts()
+ hardware_facts.update(self.get_mount_facts())
except timeout.TimeoutError:
pass
- hardware_facts.update(cpu_facts)
- hardware_facts.update(memory_facts)
- hardware_facts.update(dmi_facts)
- hardware_facts.update(device_facts)
- hardware_facts.update(mount_facts)
-
return hardware_facts
@timeout.timeout()
@@ -115,10 +111,28 @@ class OpenBSDHardware(Hardware):
return memory_facts
+ def get_uptime_facts(self):
+ # On openbsd, we need to call it with -n to get this value as an int.
+ sysctl_cmd = self.module.get_bin_path('sysctl')
+ cmd = [sysctl_cmd, '-n', 'kern.boottime']
+
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc != 0:
+ return {}
+
+ kern_boottime = out.strip()
+ if not kern_boottime.isdigit():
+ return {}
+
+ return {
+ 'uptime_seconds': int(time.time() - int(kern_boottime)),
+ }
+
def get_processor_facts(self):
cpu_facts = {}
processor = []
- for i in range(int(self.sysctl['hw.ncpu'])):
+ for i in range(int(self.sysctl['hw.ncpuonline'])):
processor.append(self.sysctl['hw.model'])
cpu_facts['processor'] = processor
@@ -129,8 +143,8 @@ class OpenBSDHardware(Hardware):
# dmesg, however even those have proven to be unreliable.
# So take a shortcut and report the logical number of processors in
# 'processor_count' and 'processor_cores' and leave it at that.
- cpu_facts['processor_count'] = self.sysctl['hw.ncpu']
- cpu_facts['processor_cores'] = self.sysctl['hw.ncpu']
+ cpu_facts['processor_count'] = self.sysctl['hw.ncpuonline']
+ cpu_facts['processor_cores'] = self.sysctl['hw.ncpuonline']
return cpu_facts
diff --git a/lib/ansible/module_utils/facts/network/hpux.py b/lib/ansible/module_utils/facts/network/hpux.py
index 6e87ee92..add57be8 100644
--- a/lib/ansible/module_utils/facts/network/hpux.py
+++ b/lib/ansible/module_utils/facts/network/hpux.py
@@ -60,7 +60,7 @@ class HPUXNetwork(Network):
def get_interfaces_info(self):
interfaces = {}
- rc, out, err = self.module.run_command("/usr/bin/netstat -ni")
+ rc, out, err = self.module.run_command("/usr/bin/netstat -niw")
lines = out.splitlines()
for line in lines:
words = line.split()
diff --git a/lib/ansible/module_utils/facts/sysctl.py b/lib/ansible/module_utils/facts/sysctl.py
index 4c82dc22..2c55d776 100644
--- a/lib/ansible/module_utils/facts/sysctl.py
+++ b/lib/ansible/module_utils/facts/sysctl.py
@@ -18,21 +18,45 @@ __metaclass__ = type
import re
+from ansible.module_utils._text import to_text
+
def get_sysctl(module, prefixes):
sysctl_cmd = module.get_bin_path('sysctl')
cmd = [sysctl_cmd]
cmd.extend(prefixes)
- rc, out, err = module.run_command(cmd)
- if rc != 0:
- return dict()
-
sysctl = dict()
- for line in out.splitlines():
- if not line:
- continue
- (key, value) = re.split(r'\s?=\s?|: ', line, maxsplit=1)
- sysctl[key] = value.strip()
+
+ try:
+ rc, out, err = module.run_command(cmd)
+ except (IOError, OSError) as e:
+ module.warn('Unable to read sysctl: %s' % to_text(e))
+ rc = 1
+
+ if rc == 0:
+ key = ''
+ value = ''
+ for line in out.splitlines():
+ if not line.strip():
+ continue
+
+ if line.startswith(' '):
+ # handle multiline values, they will not have a starting key
+ # Add the newline back in so people can split on it to parse
+ # lines if they need to.
+ value += '\n' + line
+ continue
+
+ if key:
+ sysctl[key] = value.strip()
+
+ try:
+ (key, value) = re.split(r'\s?=\s?|: ', line, maxsplit=1)
+ except Exception as e:
+ module.warn('Unable to split sysctl line (%s): %s' % (to_text(line), to_text(e)))
+
+ if key:
+ sysctl[key] = value.strip()
return sysctl
diff --git a/lib/ansible/module_utils/facts/system/date_time.py b/lib/ansible/module_utils/facts/system/date_time.py
index aa59d5bc..e42ced97 100644
--- a/lib/ansible/module_utils/facts/system/date_time.py
+++ b/lib/ansible/module_utils/facts/system/date_time.py
@@ -56,6 +56,7 @@ class DateTimeFactCollector(BaseFactCollector):
date_time_facts['iso8601_basic'] = now.strftime("%Y%m%dT%H%M%S%f")
date_time_facts['iso8601_basic_short'] = now.strftime("%Y%m%dT%H%M%S")
date_time_facts['tz'] = time.strftime("%Z")
+ date_time_facts['tz_dst'] = time.tzname[1]
date_time_facts['tz_offset'] = time.strftime("%z")
facts_dict['date_time'] = date_time_facts
diff --git a/lib/ansible/module_utils/facts/system/distribution.py b/lib/ansible/module_utils/facts/system/distribution.py
index 09236a9a..604e9d96 100644
--- a/lib/ansible/module_utils/facts/system/distribution.py
+++ b/lib/ansible/module_utils/facts/system/distribution.py
@@ -55,9 +55,11 @@ class DistributionFiles:
{'path': '/etc/altlinux-release', 'name': 'Altlinux'},
{'path': '/etc/oracle-release', 'name': 'OracleLinux'},
{'path': '/etc/slackware-version', 'name': 'Slackware'},
+ {'path': '/etc/centos-release', 'name': 'CentOS'},
{'path': '/etc/redhat-release', 'name': 'RedHat'},
{'path': '/etc/vmware-release', 'name': 'VMwareESX', 'allowempty': True},
{'path': '/etc/openwrt_release', 'name': 'OpenWrt'},
+ {'path': '/etc/os-release', 'name': 'Amazon'},
{'path': '/etc/system-release', 'name': 'Amazon'},
{'path': '/etc/alpine-release', 'name': 'Alpine'},
{'path': '/etc/arch-release', 'name': 'Archlinux', 'allowempty': True},
@@ -136,7 +138,7 @@ class DistributionFiles:
parsed, dist_file_dict = distfunc(name, dist_file_content, path, collected_facts)
return parsed, dist_file_dict
except AttributeError as exc:
- print('exc: %s' % exc)
+ self.module.debug('exc: %s' % exc)
# this should never happen, but if it does fail quietly and not with a traceback
return False, dist_file_dict
@@ -221,9 +223,24 @@ class DistributionFiles:
if 'Amazon' not in data:
return False, amazon_facts
amazon_facts['distribution'] = 'Amazon'
- version = [n for n in data.split() if n.isdigit()]
- version = version[0] if version else 'NA'
- amazon_facts['distribution_version'] = version
+ if path == '/etc/os-release':
+ version = re.search(r"VERSION_ID=\"(.*)\"", data)
+ if version:
+ distribution_version = version.group(1)
+ amazon_facts['distribution_version'] = distribution_version
+ version_data = distribution_version.split(".")
+ if len(version_data) > 1:
+ major, minor = version_data
+ else:
+ major, minor = version_data[0], 'NA'
+
+ amazon_facts['distribution_major_version'] = major
+ amazon_facts['distribution_minor_version'] = minor
+ else:
+ version = [n for n in data.split() if n.isdigit()]
+ version = version[0] if version else 'NA'
+ amazon_facts['distribution_version'] = version
+
return True, amazon_facts
def parse_distribution_file_OpenWrt(self, name, data, path, collected_facts):
@@ -321,9 +338,12 @@ class DistributionFiles:
elif 'SteamOS' in data:
debian_facts['distribution'] = 'SteamOS'
# nothing else to do, SteamOS gets correct info from python functions
- elif path in ('/etc/lsb-release', '/etc/os-release') and 'Kali' in data:
- # Kali does not provide /etc/lsb-release anymore
- debian_facts['distribution'] = 'Kali'
+ elif path in ('/etc/lsb-release', '/etc/os-release') and ('Kali' in data or 'Parrot' in data):
+ if 'Kali' in data:
+ # Kali does not provide /etc/lsb-release anymore
+ debian_facts['distribution'] = 'Kali'
+ elif 'Parrot' in data:
+ debian_facts['distribution'] = 'Parrot'
release = re.search('DISTRIB_RELEASE=(.*)', data)
if release:
debian_facts['distribution_release'] = release.groups()[0]
@@ -437,6 +457,15 @@ class DistributionFiles:
clear_facts['distribution_release'] = release.groups()[0]
return True, clear_facts
+ def parse_distribution_file_CentOS(self, name, data, path, collected_facts):
+ centos_facts = {}
+
+ if 'CentOS Stream' in data:
+ centos_facts['distribution_release'] = 'Stream'
+ return True, centos_facts
+
+ return False, centos_facts
+
class Distribution(object):
"""
@@ -447,48 +476,14 @@ class Distribution(object):
This is unit tested. Please extend the tests to cover all distributions if you have them available.
"""
- # every distribution name mentioned here, must have one of
- # - allowempty == True
- # - be listed in SEARCH_STRING
- # - have a function get_distribution_DISTNAME implemented
- OSDIST_LIST = (
- {'path': '/etc/oracle-release', 'name': 'OracleLinux'},
- {'path': '/etc/slackware-version', 'name': 'Slackware'},
- {'path': '/etc/redhat-release', 'name': 'RedHat'},
- {'path': '/etc/vmware-release', 'name': 'VMwareESX', 'allowempty': True},
- {'path': '/etc/openwrt_release', 'name': 'OpenWrt'},
- {'path': '/etc/system-release', 'name': 'Amazon'},
- {'path': '/etc/alpine-release', 'name': 'Alpine'},
- {'path': '/etc/arch-release', 'name': 'Archlinux', 'allowempty': True},
- {'path': '/etc/os-release', 'name': 'SUSE'},
- {'path': '/etc/SuSE-release', 'name': 'SUSE'},
- {'path': '/etc/gentoo-release', 'name': 'Gentoo'},
- {'path': '/etc/os-release', 'name': 'Debian'},
- {'path': '/etc/lsb-release', 'name': 'Mandriva'},
- {'path': '/etc/altlinux-release', 'name': 'Altlinux'},
- {'path': '/etc/sourcemage-release', 'name': 'SMGL'},
- {'path': '/usr/lib/os-release', 'name': 'ClearLinux'},
- {'path': '/etc/coreos/update.conf', 'name': 'Coreos'},
- {'path': '/etc/flatcar/update.conf', 'name': 'Flatcar'},
- {'path': '/etc/os-release', 'name': 'NA'},
- )
-
- SEARCH_STRING = {
- 'OracleLinux': 'Oracle Linux',
- 'RedHat': 'Red Hat',
- 'Altlinux': 'ALT Linux',
- 'ClearLinux': 'Clear Linux Software for Intel Architecture',
- 'SMGL': 'Source Mage GNU/Linux',
- }
-
# keep keys in sync with Conditionals page of docs
OS_FAMILY_MAP = {'RedHat': ['RedHat', 'Fedora', 'CentOS', 'Scientific', 'SLC',
'Ascendos', 'CloudLinux', 'PSBM', 'OracleLinux', 'OVS',
'OEL', 'Amazon', 'Virtuozzo', 'XenServer', 'Alibaba',
- 'EulerOS', 'openEuler', 'AlmaLinux'],
+ 'EulerOS', 'openEuler', 'AlmaLinux', 'Rocky'],
'Debian': ['Debian', 'Ubuntu', 'Raspbian', 'Neon', 'KDE neon',
'Linux Mint', 'SteamOS', 'Devuan', 'Kali', 'Cumulus Linux',
- 'Pop!_OS', ],
+ 'Pop!_OS', 'Parrot', 'Pardus GNU/Linux'],
'Suse': ['SuSE', 'SLES', 'SLED', 'openSUSE', 'openSUSE Tumbleweed',
'SLES_SAP', 'SUSE_LINUX', 'openSUSE Leap'],
'Archlinux': ['Archlinux', 'Antergos', 'Manjaro'],
@@ -503,7 +498,9 @@ class Distribution(object):
'HP-UX': ['HPUX'],
'Darwin': ['MacOSX'],
'FreeBSD': ['FreeBSD', 'TrueOS'],
- 'ClearLinux': ['Clear Linux OS', 'Clear Linux Mix']}
+ 'ClearLinux': ['Clear Linux OS', 'Clear Linux Mix'],
+ 'DragonFly': ['DragonflyBSD', 'DragonFlyBSD', 'Gentoo/DragonflyBSD', 'Gentoo/DragonFlyBSD'],
+ 'NetBSD': ['NetBSD'], }
OS_FAMILY = {}
for family, names in OS_FAMILY_MAP.items():
@@ -581,7 +578,7 @@ class Distribution(object):
def get_distribution_FreeBSD(self):
freebsd_facts = {}
freebsd_facts['distribution_release'] = platform.release()
- data = re.search(r'(\d+)\.(\d+)-(RELEASE|STABLE|CURRENT).*', freebsd_facts['distribution_release'])
+ data = re.search(r'(\d+)\.(\d+)-(RELEASE|STABLE|CURRENT|RC|PRERELEASE).*', freebsd_facts['distribution_release'])
if 'trueos' in platform.version():
freebsd_facts['distribution'] = 'TrueOS'
if data:
@@ -601,13 +598,28 @@ class Distribution(object):
return openbsd_facts
def get_distribution_DragonFly(self):
- return {}
+ dragonfly_facts = {
+ 'distribution_release': platform.release()
+ }
+ rc, out, dummy = self.module.run_command("/sbin/sysctl -n kern.version")
+ match = re.search(r'v(\d+)\.(\d+)\.(\d+)-(RELEASE|STABLE|CURRENT).*', out)
+ if match:
+ dragonfly_facts['distribution_major_version'] = match.group(1)
+ dragonfly_facts['distribution_version'] = '%s.%s.%s' % match.groups()[:3]
+ return dragonfly_facts
def get_distribution_NetBSD(self):
netbsd_facts = {}
- # FIXME: poking at self.facts, should eventually make these each a collector
platform_release = platform.release()
- netbsd_facts['distribution_major_version'] = platform_release.split('.')[0]
+ netbsd_facts['distribution_release'] = platform_release
+ rc, out, dummy = self.module.run_command("/sbin/sysctl -n kern.version")
+ match = re.match(r'NetBSD\s(\d+)\.(\d+)\s\((GENERIC)\).*', out)
+ if match:
+ netbsd_facts['distribution_major_version'] = match.group(1)
+ netbsd_facts['distribution_version'] = '%s.%s' % match.groups()[:2]
+ else:
+ netbsd_facts['distribution_major_version'] = platform_release.split('.')[0]
+ netbsd_facts['distribution_version'] = platform_release
return netbsd_facts
def get_distribution_SMGL(self):
diff --git a/lib/ansible/module_utils/facts/system/local.py b/lib/ansible/module_utils/facts/system/local.py
index fe33a323..e80fca9e 100644
--- a/lib/ansible/module_utils/facts/system/local.py
+++ b/lib/ansible/module_utils/facts/system/local.py
@@ -21,12 +21,10 @@ import json
import os
import stat
-from ansible.module_utils.six.moves import configparser
-from ansible.module_utils.six.moves import StringIO
-
+from ansible.module_utils._text import to_text
from ansible.module_utils.facts.utils import get_file_content
-
from ansible.module_utils.facts.collector import BaseFactCollector
+from ansible.module_utils.six.moves import configparser, StringIO
class LocalFactCollector(BaseFactCollector):
@@ -46,36 +44,47 @@ class LocalFactCollector(BaseFactCollector):
return local_facts
local = {}
+ # go over .fact files, run executables, read rest, skip bad with warning and note
for fn in sorted(glob.glob(fact_path + '/*.fact')):
- # where it will sit under local facts
+ # use filename for key where it will sit under local facts
fact_base = os.path.basename(fn).replace('.fact', '')
if stat.S_IXUSR & os.stat(fn)[stat.ST_MODE]:
- # run it
- # try to read it as json first
- # if that fails read it with ConfigParser
- # if that fails, skip it
+ failed = None
try:
+ # run it
rc, out, err = module.run_command(fn)
- except UnicodeError:
- fact = 'error loading fact - output of running %s was not utf-8' % fn
- local[fact_base] = fact
- local_facts['local'] = local
- module.warn(fact)
- return local_facts
+ if rc != 0:
+ failed = 'Failure executing fact script (%s), rc: %s, err: %s' % (fn, rc, err)
+ except (IOError, OSError) as e:
+ failed = 'Could not execute fact script (%s): %s' % (fn, to_text(e))
+
+ if failed is not None:
+ local[fact_base] = failed
+ module.warn(failed)
+ continue
else:
+ # ignores exceptions and returns empty
out = get_file_content(fn, default='')
- # load raw json
- fact = 'loading %s' % fact_base
+ try:
+ # ensure we have unicode
+ out = to_text(out, errors='surrogate_or_strict')
+ except UnicodeError:
+ fact = 'error loading fact - output of running "%s" was not utf-8' % fn
+ local[fact_base] = fact
+ module.warn(fact)
+ continue
+
+ # try to read it as json first
try:
fact = json.loads(out)
except ValueError:
- # load raw ini
+ # if that fails read it with ConfigParser
cp = configparser.ConfigParser()
try:
cp.readfp(StringIO(out))
except configparser.Error:
- fact = "error loading fact - please check content"
+ fact = "error loading facts as JSON or ini - please check content: %s" % fn
module.warn(fact)
else:
fact = {}
@@ -85,6 +94,9 @@ class LocalFactCollector(BaseFactCollector):
for opt in cp.options(sect):
val = cp.get(sect, opt)
fact[sect][opt] = val
+ except Exception as e:
+ fact = "Failed to convert (%s) to JSON: %s" % (fn, to_text(e))
+ module.warn(fact)
local[fact_base] = fact
diff --git a/lib/ansible/module_utils/facts/system/pkg_mgr.py b/lib/ansible/module_utils/facts/system/pkg_mgr.py
index be9f029f..664c362e 100644
--- a/lib/ansible/module_utils/facts/system/pkg_mgr.py
+++ b/lib/ansible/module_utils/facts/system/pkg_mgr.py
@@ -25,6 +25,7 @@ PKG_MGRS = [{'path': '/usr/bin/yum', 'name': 'yum'},
{'path': '/opt/tools/bin/pkgin', 'name': 'pkgin'},
{'path': '/opt/local/bin/port', 'name': 'macports'},
{'path': '/usr/local/bin/brew', 'name': 'homebrew'},
+ {'path': '/opt/homebrew/bin/brew', 'name': 'homebrew'},
{'path': '/sbin/apk', 'name': 'apk'},
{'path': '/usr/sbin/pkg', 'name': 'pkgng'},
{'path': '/usr/sbin/swlist', 'name': 'swdepot'},
@@ -61,9 +62,10 @@ class PkgMgrFactCollector(BaseFactCollector):
required_facts = set(['distribution'])
def _check_rh_versions(self, pkg_mgr_name, collected_facts):
+ if os.path.exists('/run/ostree-booted'):
+ return "atomic_container"
+
if collected_facts['ansible_distribution'] == 'Fedora':
- if os.path.exists('/run/ostree-booted'):
- return "atomic_container"
try:
if int(collected_facts['ansible_distribution_major_version']) < 23:
for yum in [pkg_mgr for pkg_mgr in PKG_MGRS if pkg_mgr['name'] == 'yum']:
diff --git a/lib/ansible/module_utils/facts/system/selinux.py b/lib/ansible/module_utils/facts/system/selinux.py
index c3f88fa9..d3aa89de 100644
--- a/lib/ansible/module_utils/facts/system/selinux.py
+++ b/lib/ansible/module_utils/facts/system/selinux.py
@@ -21,7 +21,7 @@ __metaclass__ = type
from ansible.module_utils.facts.collector import BaseFactCollector
try:
- import selinux
+ from ansible.module_utils.compat import selinux
HAVE_SELINUX = True
except ImportError:
HAVE_SELINUX = False
diff --git a/lib/ansible/module_utils/facts/system/user.py b/lib/ansible/module_utils/facts/system/user.py
index 745b5db3..35a07759 100644
--- a/lib/ansible/module_utils/facts/system/user.py
+++ b/lib/ansible/module_utils/facts/system/user.py
@@ -35,7 +35,10 @@ class UserFactCollector(BaseFactCollector):
user_facts['user_id'] = getpass.getuser()
- pwent = pwd.getpwnam(getpass.getuser())
+ try:
+ pwent = pwd.getpwnam(getpass.getuser())
+ except KeyError:
+ pwent = pwd.getpwuid(os.getuid())
user_facts['user_uid'] = pwent.pw_uid
user_facts['user_gid'] = pwent.pw_gid
diff --git a/lib/ansible/module_utils/facts/timeout.py b/lib/ansible/module_utils/facts/timeout.py
index 934e7aff..ebb71cc6 100644
--- a/lib/ansible/module_utils/facts/timeout.py
+++ b/lib/ansible/module_utils/facts/timeout.py
@@ -50,6 +50,8 @@ def timeout(seconds=None, error_message="Timer expired"):
except multiprocessing.TimeoutError:
# This is an ansible.module_utils.common.facts.timeout.TimeoutError
raise TimeoutError('Timer expired after %s seconds' % timeout_value)
+ finally:
+ pool.terminate()
return wrapper
diff --git a/lib/ansible/module_utils/facts/utils.py b/lib/ansible/module_utils/facts/utils.py
index 9fd00afd..08d59c03 100644
--- a/lib/ansible/module_utils/facts/utils.py
+++ b/lib/ansible/module_utils/facts/utils.py
@@ -16,26 +16,47 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+import fcntl
import os
def get_file_content(path, default=None, strip=True):
+ '''
+ Return the contents of a given file path
+
+ :args path: path to file to return contents from
+ :args default: value to return if we could not read file
+ :args strip: controls if we strip whitespace from the result or not
+
+ :returns: String with file contents (optionally stripped) or 'default' value
+ '''
data = default
if os.path.exists(path) and os.access(path, os.R_OK):
try:
+ datafile = open(path)
try:
- datafile = open(path)
- data = datafile.read()
- if strip:
- data = data.strip()
- if len(data) == 0:
- data = default
- finally:
- datafile.close()
+ # try to not enter kernel 'block' mode, which prevents timeouts
+ fd = datafile.fileno()
+ flag = fcntl.fcntl(fd, fcntl.F_GETFL)
+ fcntl.fcntl(fd, fcntl.F_SETFL, flag | os.O_NONBLOCK)
+ except Exception:
+ pass # not required to operate, but would have been nice!
+
+ # actually read the data
+ data = datafile.read()
+
+ if strip:
+ data = data.strip()
+
+ if len(data) == 0:
+ data = default
+
except Exception:
- # ignore errors as some jails/containers might have readable permissions but not allow reads to proc
- # done in 2 blocks for 2.4 compat
+ # ignore errors as some jails/containers might have readable permissions but not allow reads
pass
+ finally:
+ datafile.close()
+
return data
diff --git a/lib/ansible/module_utils/facts/virtual/base.py b/lib/ansible/module_utils/facts/virtual/base.py
index 02da049e..cb725b6a 100644
--- a/lib/ansible/module_utils/facts/virtual/base.py
+++ b/lib/ansible/module_utils/facts/virtual/base.py
@@ -46,16 +46,24 @@ class Virtual:
return virtual_facts
def get_virtual_facts(self):
- virtual_facts = {'virtualization_type': '',
- 'virtualization_role': ''}
+ virtual_facts = {
+ 'virtualization_type': '',
+ 'virtualization_role': '',
+ 'virtualization_tech_guest': set(),
+ 'virtualization_tech_host': set(),
+ }
return virtual_facts
class VirtualCollector(BaseFactCollector):
name = 'virtual'
_fact_class = Virtual
- _fact_ids = set(['virtualization_type',
- 'virtualization_role'])
+ _fact_ids = set([
+ 'virtualization_type',
+ 'virtualization_role',
+ 'virtualization_tech_guest',
+ 'virtualization_tech_host',
+ ])
def collect(self, module=None, collected_facts=None):
collected_facts = collected_facts or {}
diff --git a/lib/ansible/module_utils/facts/virtual/freebsd.py b/lib/ansible/module_utils/facts/virtual/freebsd.py
index cfaf880e..7062d019 100644
--- a/lib/ansible/module_utils/facts/virtual/freebsd.py
+++ b/lib/ansible/module_utils/facts/virtual/freebsd.py
@@ -32,23 +32,45 @@ class FreeBSDVirtual(Virtual, VirtualSysctlDetectionMixin):
def get_virtual_facts(self):
virtual_facts = {}
+ host_tech = set()
+ guest_tech = set()
+
# Set empty values as default
virtual_facts['virtualization_type'] = ''
virtual_facts['virtualization_role'] = ''
if os.path.exists('/dev/xen/xenstore'):
+ guest_tech.add('xen')
virtual_facts['virtualization_type'] = 'xen'
virtual_facts['virtualization_role'] = 'guest'
+ kern_vm_guest = self.detect_virt_product('kern.vm_guest')
+ guest_tech.update(kern_vm_guest['virtualization_tech_guest'])
+ host_tech.update(kern_vm_guest['virtualization_tech_host'])
+
+ hw_hv_vendor = self.detect_virt_product('hw.hv_vendor')
+ guest_tech.update(hw_hv_vendor['virtualization_tech_guest'])
+ host_tech.update(hw_hv_vendor['virtualization_tech_host'])
+
+ sec_jail_jailed = self.detect_virt_product('security.jail.jailed')
+ guest_tech.update(sec_jail_jailed['virtualization_tech_guest'])
+ host_tech.update(sec_jail_jailed['virtualization_tech_host'])
+
if virtual_facts['virtualization_type'] == '':
- virtual_product_facts = self.detect_virt_product('kern.vm_guest') or self.detect_virt_product(
- 'hw.hv_vendor') or self.detect_virt_product('security.jail.jailed')
- virtual_facts.update(virtual_product_facts)
+ sysctl = kern_vm_guest or hw_hv_vendor or sec_jail_jailed
+ # We call update here, then re-set virtualization_tech_host/guest
+ # later.
+ virtual_facts.update(sysctl)
+
+ virtual_vendor_facts = self.detect_virt_vendor('hw.model')
+ guest_tech.update(virtual_vendor_facts['virtualization_tech_guest'])
+ host_tech.update(virtual_vendor_facts['virtualization_tech_host'])
if virtual_facts['virtualization_type'] == '':
- virtual_vendor_facts = self.detect_virt_vendor('hw.model')
virtual_facts.update(virtual_vendor_facts)
+ virtual_facts['virtualization_tech_guest'] = guest_tech
+ virtual_facts['virtualization_tech_host'] = host_tech
return virtual_facts
diff --git a/lib/ansible/module_utils/facts/virtual/hpux.py b/lib/ansible/module_utils/facts/virtual/hpux.py
index 94ea6a1a..10574827 100644
--- a/lib/ansible/module_utils/facts/virtual/hpux.py
+++ b/lib/ansible/module_utils/facts/virtual/hpux.py
@@ -32,28 +32,38 @@ class HPUXVirtual(Virtual):
def get_virtual_facts(self):
virtual_facts = {}
+ host_tech = set()
+ guest_tech = set()
+
if os.path.exists('/usr/sbin/vecheck'):
rc, out, err = self.module.run_command("/usr/sbin/vecheck")
if rc == 0:
+ guest_tech.add('HP vPar')
virtual_facts['virtualization_type'] = 'guest'
virtual_facts['virtualization_role'] = 'HP vPar'
if os.path.exists('/opt/hpvm/bin/hpvminfo'):
rc, out, err = self.module.run_command("/opt/hpvm/bin/hpvminfo")
if rc == 0 and re.match('.*Running.*HPVM vPar.*', out):
+ guest_tech.add('HPVM vPar')
virtual_facts['virtualization_type'] = 'guest'
virtual_facts['virtualization_role'] = 'HPVM vPar'
elif rc == 0 and re.match('.*Running.*HPVM guest.*', out):
+ guest_tech.add('HPVM IVM')
virtual_facts['virtualization_type'] = 'guest'
virtual_facts['virtualization_role'] = 'HPVM IVM'
elif rc == 0 and re.match('.*Running.*HPVM host.*', out):
+ guest_tech.add('HPVM')
virtual_facts['virtualization_type'] = 'host'
virtual_facts['virtualization_role'] = 'HPVM'
if os.path.exists('/usr/sbin/parstatus'):
rc, out, err = self.module.run_command("/usr/sbin/parstatus")
if rc == 0:
+ guest_tech.add('HP nPar')
virtual_facts['virtualization_type'] = 'guest'
virtual_facts['virtualization_role'] = 'HP nPar'
+ virtual_facts['virtualization_tech_guest'] = guest_tech
+ virtual_facts['virtualization_tech_host'] = host_tech
return virtual_facts
diff --git a/lib/ansible/module_utils/facts/virtual/linux.py b/lib/ansible/module_utils/facts/virtual/linux.py
index 5722e462..066cf5e1 100644
--- a/lib/ansible/module_utils/facts/virtual/linux.py
+++ b/lib/ansible/module_utils/facts/virtual/linux.py
@@ -35,141 +35,246 @@ class LinuxVirtual(Virtual):
# For more information, check: http://people.redhat.com/~rjones/virt-what/
def get_virtual_facts(self):
virtual_facts = {}
+
+ # We want to maintain compatibility with the old "virtualization_type"
+ # and "virtualization_role" entries, so we need to track if we found
+ # them. We won't return them until the end, but if we found them early,
+ # we should avoid updating them again.
+ found_virt = False
+
+ # But as we go along, we also want to track virt tech the new way.
+ host_tech = set()
+ guest_tech = set()
+
# lxc/docker
if os.path.exists('/proc/1/cgroup'):
for line in get_file_lines('/proc/1/cgroup'):
if re.search(r'/docker(/|-[0-9a-f]+\.scope)', line):
- virtual_facts['virtualization_type'] = 'docker'
- virtual_facts['virtualization_role'] = 'guest'
- return virtual_facts
+ guest_tech.add('docker')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'docker'
+ virtual_facts['virtualization_role'] = 'guest'
+ found_virt = True
if re.search('/lxc/', line) or re.search('/machine.slice/machine-lxc', line):
- virtual_facts['virtualization_type'] = 'lxc'
- virtual_facts['virtualization_role'] = 'guest'
- return virtual_facts
+ guest_tech.add('lxc')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'lxc'
+ virtual_facts['virtualization_role'] = 'guest'
+ found_virt = True
+ if re.search('/system.slice/containerd.service', line):
+ guest_tech.add('containerd')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'containerd'
+ virtual_facts['virtualization_role'] = 'guest'
+ found_virt = True
# lxc does not always appear in cgroups anymore but sets 'container=lxc' environment var, requires root privs
if os.path.exists('/proc/1/environ'):
for line in get_file_lines('/proc/1/environ', line_sep='\x00'):
if re.search('container=lxc', line):
- virtual_facts['virtualization_type'] = 'lxc'
- virtual_facts['virtualization_role'] = 'guest'
- return virtual_facts
+ guest_tech.add('lxc')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'lxc'
+ virtual_facts['virtualization_role'] = 'guest'
+ found_virt = True
if re.search('container=podman', line):
- virtual_facts['virtualization_type'] = 'podman'
- virtual_facts['virtualization_role'] = 'guest'
- return virtual_facts
+ guest_tech.add('podman')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'podman'
+ virtual_facts['virtualization_role'] = 'guest'
+ found_virt = True
if re.search('^container=.', line):
- virtual_facts['virtualization_type'] = 'container'
- virtual_facts['virtualization_role'] = 'guest'
- return virtual_facts
+ guest_tech.add('container')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'container'
+ virtual_facts['virtualization_role'] = 'guest'
+ found_virt = True
if os.path.exists('/proc/vz') and not os.path.exists('/proc/lve'):
virtual_facts['virtualization_type'] = 'openvz'
if os.path.exists('/proc/bc'):
- virtual_facts['virtualization_role'] = 'host'
+ host_tech.add('openvz')
+ if not found_virt:
+ virtual_facts['virtualization_role'] = 'host'
else:
- virtual_facts['virtualization_role'] = 'guest'
- return virtual_facts
+ guest_tech.add('openvz')
+ if not found_virt:
+ virtual_facts['virtualization_role'] = 'guest'
+ found_virt = True
systemd_container = get_file_content('/run/systemd/container')
if systemd_container:
- virtual_facts['virtualization_type'] = systemd_container
- virtual_facts['virtualization_role'] = 'guest'
- return virtual_facts
+ guest_tech.add(systemd_container)
+ if not found_virt:
+ virtual_facts['virtualization_type'] = systemd_container
+ virtual_facts['virtualization_role'] = 'guest'
+ found_virt = True
+
+ # ensure 'container' guest_tech is appropriately set
+ if guest_tech.intersection(set(['docker', 'lxc', 'podman', 'openvz', 'containerd'])) or systemd_container:
+ guest_tech.add('container')
if os.path.exists("/proc/xen"):
- virtual_facts['virtualization_type'] = 'xen'
- virtual_facts['virtualization_role'] = 'guest'
+ is_xen_host = False
try:
for line in get_file_lines('/proc/xen/capabilities'):
if "control_d" in line:
- virtual_facts['virtualization_role'] = 'host'
+ is_xen_host = True
except IOError:
pass
- return virtual_facts
+
+ if is_xen_host:
+ host_tech.add('xen')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'xen'
+ virtual_facts['virtualization_role'] = 'host'
+ else:
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'xen'
+ virtual_facts['virtualization_role'] = 'guest'
+ found_virt = True
# assume guest for this block
- virtual_facts['virtualization_role'] = 'guest'
+ if not found_virt:
+ virtual_facts['virtualization_role'] = 'guest'
product_name = get_file_content('/sys/devices/virtual/dmi/id/product_name')
+ sys_vendor = get_file_content('/sys/devices/virtual/dmi/id/sys_vendor')
+ product_family = get_file_content('/sys/devices/virtual/dmi/id/product_family')
if product_name in ('KVM', 'KVM Server', 'Bochs', 'AHV'):
- virtual_facts['virtualization_type'] = 'kvm'
- return virtual_facts
-
- if product_name == 'RHEV Hypervisor':
- virtual_facts['virtualization_type'] = 'RHEV'
- return virtual_facts
+ guest_tech.add('kvm')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'kvm'
+ found_virt = True
+
+ if sys_vendor == 'oVirt':
+ guest_tech.add('oVirt')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'oVirt'
+ found_virt = True
+
+ if sys_vendor == 'Red Hat':
+ if product_family == 'RHV':
+ guest_tech.add('RHV')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'RHV'
+ found_virt = True
+ elif product_name == 'RHEV Hypervisor':
+ guest_tech.add('RHEV')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'RHEV'
+ found_virt = True
if product_name in ('VMware Virtual Platform', 'VMware7,1'):
- virtual_facts['virtualization_type'] = 'VMware'
- return virtual_facts
+ guest_tech.add('VMware')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'VMware'
+ found_virt = True
if product_name in ('OpenStack Compute', 'OpenStack Nova'):
- virtual_facts['virtualization_type'] = 'openstack'
- return virtual_facts
+ guest_tech.add('openstack')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'openstack'
+ found_virt = True
bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor')
if bios_vendor == 'Xen':
- virtual_facts['virtualization_type'] = 'xen'
- return virtual_facts
+ guest_tech.add('xen')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'xen'
+ found_virt = True
if bios_vendor == 'innotek GmbH':
- virtual_facts['virtualization_type'] = 'virtualbox'
- return virtual_facts
+ guest_tech.add('virtualbox')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'virtualbox'
+ found_virt = True
if bios_vendor in ('Amazon EC2', 'DigitalOcean', 'Hetzner'):
- virtual_facts['virtualization_type'] = 'kvm'
- return virtual_facts
-
- sys_vendor = get_file_content('/sys/devices/virtual/dmi/id/sys_vendor')
+ guest_tech.add('kvm')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'kvm'
+ found_virt = True
- KVM_SYS_VENDORS = ('QEMU', 'oVirt', 'Amazon EC2', 'DigitalOcean', 'Google', 'Scaleway', 'Nutanix')
+ KVM_SYS_VENDORS = ('QEMU', 'Amazon EC2', 'DigitalOcean', 'Google', 'Scaleway', 'Nutanix')
if sys_vendor in KVM_SYS_VENDORS:
- virtual_facts['virtualization_type'] = 'kvm'
- return virtual_facts
+ guest_tech.add('kvm')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'kvm'
+ found_virt = True
+
+ if sys_vendor == 'KubeVirt':
+ guest_tech.add('KubeVirt')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'KubeVirt'
+ found_virt = True
# FIXME: This does also match hyperv
if sys_vendor == 'Microsoft Corporation':
- virtual_facts['virtualization_type'] = 'VirtualPC'
- return virtual_facts
+ guest_tech.add('VirtualPC')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'VirtualPC'
+ found_virt = True
if sys_vendor == 'Parallels Software International Inc.':
- virtual_facts['virtualization_type'] = 'parallels'
- return virtual_facts
+ guest_tech.add('parallels')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'parallels'
+ found_virt = True
if sys_vendor == 'OpenStack Foundation':
- virtual_facts['virtualization_type'] = 'openstack'
- return virtual_facts
+ guest_tech.add('openstack')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'openstack'
+ found_virt = True
# unassume guest
- del virtual_facts['virtualization_role']
+ if not found_virt:
+ del virtual_facts['virtualization_role']
if os.path.exists('/proc/self/status'):
for line in get_file_lines('/proc/self/status'):
if re.match(r'^VxID:\s+\d+', line):
- virtual_facts['virtualization_type'] = 'linux_vserver'
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'linux_vserver'
if re.match(r'^VxID:\s+0', line):
- virtual_facts['virtualization_role'] = 'host'
+ host_tech.add('linux_vserver')
+ if not found_virt:
+ virtual_facts['virtualization_role'] = 'host'
else:
- virtual_facts['virtualization_role'] = 'guest'
- return virtual_facts
+ guest_tech.add('linux_vserver')
+ if not found_virt:
+ virtual_facts['virtualization_role'] = 'guest'
+ found_virt = True
if os.path.exists('/proc/cpuinfo'):
for line in get_file_lines('/proc/cpuinfo'):
if re.match('^model name.*QEMU Virtual CPU', line):
- virtual_facts['virtualization_type'] = 'kvm'
+ guest_tech.add('kvm')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'kvm'
elif re.match('^vendor_id.*User Mode Linux', line):
- virtual_facts['virtualization_type'] = 'uml'
+ guest_tech.add('uml')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'uml'
elif re.match('^model name.*UML', line):
- virtual_facts['virtualization_type'] = 'uml'
+ guest_tech.add('uml')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'uml'
elif re.match('^machine.*CHRP IBM pSeries .emulated by qemu.', line):
- virtual_facts['virtualization_type'] = 'kvm'
+ guest_tech.add('kvm')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'kvm'
elif re.match('^vendor_id.*PowerVM Lx86', line):
- virtual_facts['virtualization_type'] = 'powervm_lx86'
+ guest_tech.add('powervm_lx86')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'powervm_lx86'
elif re.match('^vendor_id.*IBM/S390', line):
- virtual_facts['virtualization_type'] = 'PR/SM'
+ guest_tech.add('PR/SM')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'PR/SM'
lscpu = self.module.get_bin_path('lscpu')
if lscpu:
rc, out, err = self.module.run_command(["lscpu"])
@@ -178,16 +283,24 @@ class LinuxVirtual(Virtual):
data = line.split(":", 1)
key = data[0].strip()
if key == 'Hypervisor':
- virtual_facts['virtualization_type'] = data[1].strip()
+ tech = data[1].strip()
+ guest_tech.add(tech)
+ if not found_virt:
+ virtual_facts['virtualization_type'] = tech
else:
- virtual_facts['virtualization_type'] = 'ibm_systemz'
+ guest_tech.add('ibm_systemz')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'ibm_systemz'
else:
continue
if virtual_facts['virtualization_type'] == 'PR/SM':
- virtual_facts['virtualization_role'] = 'LPAR'
+ if not found_virt:
+ virtual_facts['virtualization_role'] = 'LPAR'
else:
- virtual_facts['virtualization_role'] = 'guest'
- return virtual_facts
+ if not found_virt:
+ virtual_facts['virtualization_role'] = 'guest'
+ if not found_virt:
+ found_virt = True
# Beware that we can have both kvm and virtualbox running on a single system
if os.path.exists("/proc/modules") and os.access('/proc/modules', os.R_OK):
@@ -197,8 +310,10 @@ class LinuxVirtual(Virtual):
modules.append(data[0])
if 'kvm' in modules:
- virtual_facts['virtualization_type'] = 'kvm'
- virtual_facts['virtualization_role'] = 'host'
+ host_tech.add('kvm')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'kvm'
+ virtual_facts['virtualization_role'] = 'host'
if os.path.isdir('/rhev/'):
# Check whether this is a RHEV hypervisor (is vdsm running ?)
@@ -206,23 +321,32 @@ class LinuxVirtual(Virtual):
try:
with open(f) as virt_fh:
comm_content = virt_fh.read().rstrip()
- if comm_content == 'vdsm':
- virtual_facts['virtualization_type'] = 'RHEV'
+
+ if comm_content in ('vdsm', 'vdsmd'):
+ # We add both kvm and RHEV to host_tech in this case.
+ # It's accurate. RHEV uses KVM.
+ host_tech.add('RHEV')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'RHEV'
break
except Exception:
pass
- return virtual_facts
+ found_virt = True
if 'vboxdrv' in modules:
- virtual_facts['virtualization_type'] = 'virtualbox'
- virtual_facts['virtualization_role'] = 'host'
- return virtual_facts
+ host_tech.add('virtualbox')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'virtualbox'
+ virtual_facts['virtualization_role'] = 'host'
+ found_virt = True
if 'virtio' in modules:
- virtual_facts['virtualization_type'] = 'kvm'
- virtual_facts['virtualization_role'] = 'guest'
- return virtual_facts
+ host_tech.add('kvm')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'kvm'
+ virtual_facts['virtualization_role'] = 'guest'
+ found_virt = True
# In older Linux Kernel versions, /sys filesystem is not available
# dmidecode is the safest option to parse virtualization related values
@@ -234,20 +358,35 @@ class LinuxVirtual(Virtual):
# Strip out commented lines (specific dmidecode output)
vendor_name = ''.join([line.strip() for line in out.splitlines() if not line.startswith('#')])
if vendor_name.startswith('VMware'):
- virtual_facts['virtualization_type'] = 'VMware'
- virtual_facts['virtualization_role'] = 'guest'
- return virtual_facts
+ guest_tech.add('VMware')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'VMware'
+ virtual_facts['virtualization_role'] = 'guest'
+ found_virt = True
if 'BHYVE' in out:
- virtual_facts['virtualization_type'] = 'bhyve'
- virtual_facts['virtualization_role'] = 'guest'
- return virtual_facts
+ guest_tech.add('bhyve')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'bhyve'
+ virtual_facts['virtualization_role'] = 'guest'
+ found_virt = True
+
+ if os.path.exists('/dev/kvm'):
+ host_tech.add('kvm')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'kvm'
+ virtual_facts['virtualization_role'] = 'host'
+ found_virt = True
# If none of the above matches, return 'NA' for virtualization_type
# and virtualization_role. This allows for proper grouping.
- virtual_facts['virtualization_type'] = 'NA'
- virtual_facts['virtualization_role'] = 'NA'
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'NA'
+ virtual_facts['virtualization_role'] = 'NA'
+ found_virt = True
+ virtual_facts['virtualization_tech_guest'] = guest_tech
+ virtual_facts['virtualization_tech_host'] = host_tech
return virtual_facts
diff --git a/lib/ansible/module_utils/facts/virtual/netbsd.py b/lib/ansible/module_utils/facts/virtual/netbsd.py
index 514ef859..b4ef14ed 100644
--- a/lib/ansible/module_utils/facts/virtual/netbsd.py
+++ b/lib/ansible/module_utils/facts/virtual/netbsd.py
@@ -27,21 +27,44 @@ class NetBSDVirtual(Virtual, VirtualSysctlDetectionMixin):
def get_virtual_facts(self):
virtual_facts = {}
+ host_tech = set()
+ guest_tech = set()
+
# Set empty values as default
virtual_facts['virtualization_type'] = ''
virtual_facts['virtualization_role'] = ''
virtual_product_facts = self.detect_virt_product('machdep.dmi.system-product')
+ guest_tech.update(virtual_product_facts['virtualization_tech_guest'])
+ host_tech.update(virtual_product_facts['virtualization_tech_host'])
virtual_facts.update(virtual_product_facts)
+ virtual_vendor_facts = self.detect_virt_vendor('machdep.dmi.system-vendor')
+ guest_tech.update(virtual_vendor_facts['virtualization_tech_guest'])
+ host_tech.update(virtual_vendor_facts['virtualization_tech_host'])
+
+ if virtual_facts['virtualization_type'] == '':
+ virtual_facts.update(virtual_vendor_facts)
+
+ # The above logic is tried first for backwards compatibility. If
+ # something above matches, use it. Otherwise if the result is still
+ # empty, try machdep.hypervisor.
+ virtual_vendor_facts = self.detect_virt_vendor('machdep.hypervisor')
+ guest_tech.update(virtual_vendor_facts['virtualization_tech_guest'])
+ host_tech.update(virtual_vendor_facts['virtualization_tech_host'])
+
if virtual_facts['virtualization_type'] == '':
- virtual_vendor_facts = self.detect_virt_vendor('machdep.dmi.system-vendor')
virtual_facts.update(virtual_vendor_facts)
if os.path.exists('/dev/xencons'):
- virtual_facts['virtualization_type'] = 'xen'
- virtual_facts['virtualization_role'] = 'guest'
+ guest_tech.add('xen')
+
+ if virtual_facts['virtualization_type'] == '':
+ virtual_facts['virtualization_type'] = 'xen'
+ virtual_facts['virtualization_role'] = 'guest'
+ virtual_facts['virtualization_tech_guest'] = guest_tech
+ virtual_facts['virtualization_tech_host'] = host_tech
return virtual_facts
diff --git a/lib/ansible/module_utils/facts/virtual/openbsd.py b/lib/ansible/module_utils/facts/virtual/openbsd.py
index 42daa337..c449028d 100644
--- a/lib/ansible/module_utils/facts/virtual/openbsd.py
+++ b/lib/ansible/module_utils/facts/virtual/openbsd.py
@@ -35,16 +35,23 @@ class OpenBSDVirtual(Virtual, VirtualSysctlDetectionMixin):
def get_virtual_facts(self):
virtual_facts = {}
+ host_tech = set()
+ guest_tech = set()
# Set empty values as default
virtual_facts['virtualization_type'] = ''
virtual_facts['virtualization_role'] = ''
virtual_product_facts = self.detect_virt_product('hw.product')
+ guest_tech.update(virtual_product_facts['virtualization_tech_guest'])
+ host_tech.update(virtual_product_facts['virtualization_tech_host'])
virtual_facts.update(virtual_product_facts)
+ virtual_vendor_facts = self.detect_virt_vendor('hw.vendor')
+ guest_tech.update(virtual_vendor_facts['virtualization_tech_guest'])
+ host_tech.update(virtual_vendor_facts['virtualization_tech_host'])
+
if virtual_facts['virtualization_type'] == '':
- virtual_vendor_facts = self.detect_virt_vendor('hw.vendor')
virtual_facts.update(virtual_vendor_facts)
# Check the dmesg if vmm(4) attached, indicating the host is
@@ -53,9 +60,12 @@ class OpenBSDVirtual(Virtual, VirtualSysctlDetectionMixin):
for line in dmesg_boot.splitlines():
match = re.match('^vmm0 at mainbus0: (SVM/RVI|VMX/EPT)$', line)
if match:
+ host_tech.add('vmm')
virtual_facts['virtualization_type'] = 'vmm'
virtual_facts['virtualization_role'] = 'host'
+ virtual_facts['virtualization_tech_guest'] = guest_tech
+ virtual_facts['virtualization_tech_host'] = host_tech
return virtual_facts
diff --git a/lib/ansible/module_utils/facts/virtual/sunos.py b/lib/ansible/module_utils/facts/virtual/sunos.py
index 06ce661a..1e92677e 100644
--- a/lib/ansible/module_utils/facts/virtual/sunos.py
+++ b/lib/ansible/module_utils/facts/virtual/sunos.py
@@ -32,19 +32,27 @@ class SunOSVirtual(Virtual):
def get_virtual_facts(self):
virtual_facts = {}
- # Check if it's a zone
+ host_tech = set()
+ guest_tech = set()
+ # Check if it's a zone
zonename = self.module.get_bin_path('zonename')
if zonename:
rc, out, err = self.module.run_command(zonename)
- if rc == 0 and out.rstrip() != "global":
- virtual_facts['container'] = 'zone'
+ if rc == 0:
+ if out.rstrip() == "global":
+ host_tech.add('zone')
+ else:
+ guest_tech.add('zone')
+ virtual_facts['container'] = 'zone'
+
# Check if it's a branded zone (i.e. Solaris 8/9 zone)
if os.path.isdir('/.SUNWnative'):
+ guest_tech.add('zone')
virtual_facts['container'] = 'zone'
+
# If it's a zone check if we can detect if our global zone is itself virtualized.
# Relies on the "guest tools" (e.g. vmware tools) to be installed
-
if 'container' in virtual_facts and virtual_facts['container'] == 'zone':
modinfo = self.module.get_bin_path('modinfo')
if modinfo:
@@ -52,13 +60,16 @@ class SunOSVirtual(Virtual):
if rc == 0:
for line in out.splitlines():
if 'VMware' in line:
+ guest_tech.add('vmware')
virtual_facts['virtualization_type'] = 'vmware'
virtual_facts['virtualization_role'] = 'guest'
if 'VirtualBox' in line:
+ guest_tech.add('virtualbox')
virtual_facts['virtualization_type'] = 'virtualbox'
virtual_facts['virtualization_role'] = 'guest'
if os.path.exists('/proc/vz'):
+ guest_tech.add('virtuozzo')
virtual_facts['virtualization_type'] = 'virtuozzo'
virtual_facts['virtualization_role'] = 'guest'
@@ -77,6 +88,7 @@ class SunOSVirtual(Virtual):
for line in out.splitlines():
fields = line.split('|')
if fields[0] == 'DOMAINROLE' and fields[1] == 'impl=LDoms':
+ guest_tech.add('ldom')
virtual_facts['virtualization_type'] = 'ldom'
virtual_facts['virtualization_role'] = 'guest'
hostfeatures = []
@@ -97,21 +109,28 @@ class SunOSVirtual(Virtual):
if rc == 0:
for line in out.splitlines():
if 'VMware' in line:
+ guest_tech.add('vmware')
virtual_facts['virtualization_type'] = 'vmware'
virtual_facts['virtualization_role'] = 'guest'
elif 'Parallels' in line:
+ guest_tech.add('parallels')
virtual_facts['virtualization_type'] = 'parallels'
virtual_facts['virtualization_role'] = 'guest'
elif 'VirtualBox' in line:
+ guest_tech.add('virtualbox')
virtual_facts['virtualization_type'] = 'virtualbox'
virtual_facts['virtualization_role'] = 'guest'
elif 'HVM domU' in line:
+ guest_tech.add('xen')
virtual_facts['virtualization_type'] = 'xen'
virtual_facts['virtualization_role'] = 'guest'
elif 'KVM' in line:
+ guest_tech.add('kvm')
virtual_facts['virtualization_type'] = 'kvm'
virtual_facts['virtualization_role'] = 'guest'
+ virtual_facts['virtualization_tech_guest'] = guest_tech
+ virtual_facts['virtualization_tech_host'] = host_tech
return virtual_facts
diff --git a/lib/ansible/module_utils/facts/virtual/sysctl.py b/lib/ansible/module_utils/facts/virtual/sysctl.py
index a159cc15..1c7b2b34 100644
--- a/lib/ansible/module_utils/facts/virtual/sysctl.py
+++ b/lib/ansible/module_utils/facts/virtual/sysctl.py
@@ -25,45 +25,88 @@ class VirtualSysctlDetectionMixin(object):
def detect_virt_product(self, key):
virtual_product_facts = {}
+ host_tech = set()
+ guest_tech = set()
+
+ # We do similar to what we do in linux.py -- We want to allow multiple
+ # virt techs to show up, but maintain compatibility, so we have to track
+ # when we would have stopped, even though now we go through everything.
+ found_virt = False
+
self.detect_sysctl()
if self.sysctl_path:
rc, out, err = self.module.run_command("%s -n %s" % (self.sysctl_path, key))
if rc == 0:
if re.match('(KVM|kvm|Bochs|SmartDC).*', out):
- virtual_product_facts['virtualization_type'] = 'kvm'
- virtual_product_facts['virtualization_role'] = 'guest'
- elif re.match('.*VMware.*', out):
- virtual_product_facts['virtualization_type'] = 'VMware'
- virtual_product_facts['virtualization_role'] = 'guest'
- elif out.rstrip() == 'VirtualBox':
- virtual_product_facts['virtualization_type'] = 'virtualbox'
- virtual_product_facts['virtualization_role'] = 'guest'
- elif out.rstrip() == 'HVM domU':
- virtual_product_facts['virtualization_type'] = 'xen'
- virtual_product_facts['virtualization_role'] = 'guest'
- elif out.rstrip() == 'Parallels':
- virtual_product_facts['virtualization_type'] = 'parallels'
- virtual_product_facts['virtualization_role'] = 'guest'
- elif out.rstrip() == 'RHEV Hypervisor':
- virtual_product_facts['virtualization_type'] = 'RHEV'
- virtual_product_facts['virtualization_role'] = 'guest'
- elif (key == 'security.jail.jailed') and (out.rstrip() == '1'):
- virtual_product_facts['virtualization_type'] = 'jails'
- virtual_product_facts['virtualization_role'] = 'guest'
+ guest_tech.add('kvm')
+ if not found_virt:
+ virtual_product_facts['virtualization_type'] = 'kvm'
+ virtual_product_facts['virtualization_role'] = 'guest'
+ found_virt = True
+ if re.match('.*VMware.*', out):
+ guest_tech.add('VMware')
+ if not found_virt:
+ virtual_product_facts['virtualization_type'] = 'VMware'
+ virtual_product_facts['virtualization_role'] = 'guest'
+ found_virt = True
+ if out.rstrip() == 'VirtualBox':
+ guest_tech.add('virtualbox')
+ if not found_virt:
+ virtual_product_facts['virtualization_type'] = 'virtualbox'
+ virtual_product_facts['virtualization_role'] = 'guest'
+ found_virt = True
+ if re.match('(HVM domU|XenPVH|XenPV|XenPVHVM).*', out):
+ guest_tech.add('xen')
+ if not found_virt:
+ virtual_product_facts['virtualization_type'] = 'xen'
+ virtual_product_facts['virtualization_role'] = 'guest'
+ found_virt = True
+ if out.rstrip() == 'Hyper-V':
+ guest_tech.add('Hyper-V')
+ if not found_virt:
+ virtual_product_facts['virtualization_type'] = 'Hyper-V'
+ virtual_product_facts['virtualization_role'] = 'guest'
+ found_virt = True
+ if out.rstrip() == 'Parallels':
+ guest_tech.add('parallels')
+ if not found_virt:
+ virtual_product_facts['virtualization_type'] = 'parallels'
+ virtual_product_facts['virtualization_role'] = 'guest'
+ found_virt = True
+ if out.rstrip() == 'RHEV Hypervisor':
+ guest_tech.add('RHEV')
+ if not found_virt:
+ virtual_product_facts['virtualization_type'] = 'RHEV'
+ virtual_product_facts['virtualization_role'] = 'guest'
+ found_virt = True
+ if (key == 'security.jail.jailed') and (out.rstrip() == '1'):
+ guest_tech.add('jails')
+ if not found_virt:
+ virtual_product_facts['virtualization_type'] = 'jails'
+ virtual_product_facts['virtualization_role'] = 'guest'
+ found_virt = True
+ virtual_product_facts['virtualization_tech_guest'] = guest_tech
+ virtual_product_facts['virtualization_tech_host'] = host_tech
return virtual_product_facts
def detect_virt_vendor(self, key):
virtual_vendor_facts = {}
+ host_tech = set()
+ guest_tech = set()
self.detect_sysctl()
if self.sysctl_path:
rc, out, err = self.module.run_command("%s -n %s" % (self.sysctl_path, key))
if rc == 0:
if out.rstrip() == 'QEMU':
+ guest_tech.add('kvm')
virtual_vendor_facts['virtualization_type'] = 'kvm'
virtual_vendor_facts['virtualization_role'] = 'guest'
if out.rstrip() == 'OpenBSD':
+ guest_tech.add('vmm')
virtual_vendor_facts['virtualization_type'] = 'vmm'
virtual_vendor_facts['virtualization_role'] = 'guest'
+ virtual_vendor_facts['virtualization_tech_guest'] = guest_tech
+ virtual_vendor_facts['virtualization_tech_host'] = host_tech
return virtual_vendor_facts
diff --git a/lib/ansible/module_utils/json_utils.py b/lib/ansible/module_utils/json_utils.py
index d5639fa3..0e95aa67 100644
--- a/lib/ansible/module_utils/json_utils.py
+++ b/lib/ansible/module_utils/json_utils.py
@@ -32,7 +32,7 @@ import json
# NB: a copy of this function exists in ../../modules/core/async_wrapper.py. Ensure any
# changes are propagated there.
-def _filter_non_json_lines(data):
+def _filter_non_json_lines(data, objects_only=False):
'''
Used to filter unrelated output around module JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
@@ -50,7 +50,7 @@ def _filter_non_json_lines(data):
if line.startswith(u'{'):
endchar = u'}'
break
- elif line.startswith(u'['):
+ elif not objects_only and line.startswith(u'['):
endchar = u']'
break
else:
diff --git a/lib/ansible/module_utils/six/__init__.py b/lib/ansible/module_utils/six/__init__.py
index d2d9a45a..9df35d54 100644
--- a/lib/ansible/module_utils/six/__init__.py
+++ b/lib/ansible/module_utils/six/__init__.py
@@ -3,7 +3,7 @@
# upstream vendored file that we're not going to modify on our own
# pylint: disable=undefined-variable
-# Copyright (c) 2010-2018 Benjamin Peterson
+# Copyright (c) 2010-2019 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
@@ -35,10 +35,11 @@ import types
# The following makes it easier for us to script updates of the bundled code. It is not part of
# upstream six
-_BUNDLED_METADATA = {"pypi_name": "six", "version": "1.12.0"}
+# CANT_UPDATE due to py2.6 drop: https://github.com/benjaminp/six/pull/314
+_BUNDLED_METADATA = {"pypi_name": "six", "version": "1.13.0"}
__author__ = "Benjamin Peterson <benjamin@python.org>"
-__version__ = "1.12.0"
+__version__ = "1.13.0"
# Useful for very coarse version differentiation.
@@ -265,8 +266,10 @@ _moved_attributes = [
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
+ MovedModule("collections_abc", "collections", "collections.abc" if sys.version_info >= (3, 3) else "collections"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
+ MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
@@ -648,6 +651,7 @@ if PY3:
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
+ del io
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
@@ -835,7 +839,15 @@ def with_metaclass(meta, *bases):
class metaclass(type):
def __new__(cls, name, this_bases, d):
- return meta(name, bases, d)
+ if sys.version_info[:2] >= (3, 7):
+ # This version introduced PEP 560 that requires a bit
+ # of extra care (we mimic what is done by __build_class__).
+ resolved_bases = types.resolve_bases(bases)
+ if resolved_bases is not bases:
+ d['__orig_bases__'] = bases
+ else:
+ resolved_bases = bases
+ return meta(name, resolved_bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py
index 2502df09..632387f6 100644
--- a/lib/ansible/module_utils/urls.py
+++ b/lib/ansible/module_utils/urls.py
@@ -74,17 +74,17 @@ import ansible.module_utils.six.moves.urllib.error as urllib_error
from ansible.module_utils.common.collections import Mapping
from ansible.module_utils.six import PY3, string_types
from ansible.module_utils.six.moves import cStringIO
-from ansible.module_utils.basic import get_distribution
+from ansible.module_utils.basic import get_distribution, missing_required_lib
from ansible.module_utils._text import to_bytes, to_native, to_text
try:
# python3
import urllib.request as urllib_request
- from urllib.request import AbstractHTTPHandler
+ from urllib.request import AbstractHTTPHandler, BaseHandler
except ImportError:
# python2
import urllib2 as urllib_request
- from urllib2 import AbstractHTTPHandler
+ from urllib2 import AbstractHTTPHandler, BaseHandler
urllib_request.HTTPRedirectHandler.http_error_308 = urllib_request.HTTPRedirectHandler.http_error_307
@@ -171,13 +171,105 @@ except ImportError:
except ImportError:
HAS_MATCH_HOSTNAME = False
+HAS_CRYPTOGRAPHY = True
+try:
+ from cryptography import x509
+ from cryptography.hazmat.backends import default_backend
+ from cryptography.hazmat.primitives import hashes
+ from cryptography.exceptions import UnsupportedAlgorithm
+except ImportError:
+ HAS_CRYPTOGRAPHY = False
+# Old import for GSSAPI authentication, this is not used in urls.py but kept for backwards compatibility.
try:
import urllib_gssapi
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
+GSSAPI_IMP_ERR = None
+try:
+ import gssapi
+
+ class HTTPGSSAPIAuthHandler(BaseHandler):
+ """ Handles Negotiate/Kerberos support through the gssapi library. """
+
+ AUTH_HEADER_PATTERN = re.compile(r'(?:.*)\s*(Negotiate|Kerberos)\s*([^,]*),?', re.I)
+ handler_order = 480 # Handle before Digest authentication
+
+ def __init__(self, username=None, password=None):
+ self.username = username
+ self.password = password
+ self._context = None
+
+ def get_auth_value(self, headers):
+ auth_match = self.AUTH_HEADER_PATTERN.search(headers.get('www-authenticate', ''))
+ if auth_match:
+ return auth_match.group(1), base64.b64decode(auth_match.group(2))
+
+ def http_error_401(self, req, fp, code, msg, headers):
+ # If we've already attempted the auth and we've reached this again then there was a failure.
+ if self._context:
+ return
+
+ parsed = generic_urlparse(urlparse(req.get_full_url()))
+
+ auth_header = self.get_auth_value(headers)
+ if not auth_header:
+ return
+ auth_protocol, in_token = auth_header
+
+ username = None
+ if self.username:
+ username = gssapi.Name(self.username, name_type=gssapi.NameType.user)
+
+ if username and self.password:
+ if not hasattr(gssapi.raw, 'acquire_cred_with_password'):
+ raise NotImplementedError("Platform GSSAPI library does not support "
+ "gss_acquire_cred_with_password, cannot acquire GSSAPI credential with "
+ "explicit username and password.")
+
+ b_password = to_bytes(self.password, errors='surrogate_or_strict')
+ cred = gssapi.raw.acquire_cred_with_password(username, b_password, usage='initiate').creds
+
+ else:
+ cred = gssapi.Credentials(name=username, usage='initiate')
+
+ # Get the peer certificate for the channel binding token if possible (HTTPS). A bug on macOS causes the
+ # authentication to fail when the CBT is present. Just skip that platform.
+ cbt = None
+ cert = getpeercert(fp, True)
+ if cert and platform.system() != 'Darwin':
+ cert_hash = get_channel_binding_cert_hash(cert)
+ if cert_hash:
+ cbt = gssapi.raw.ChannelBindings(application_data=b"tls-server-end-point:" + cert_hash)
+
+ # TODO: We could add another option that is set to include the port in the SPN if desired in the future.
+ target = gssapi.Name("HTTP@%s" % parsed['hostname'], gssapi.NameType.hostbased_service)
+ self._context = gssapi.SecurityContext(usage="initiate", name=target, creds=cred, channel_bindings=cbt)
+
+ resp = None
+ while not self._context.complete:
+ out_token = self._context.step(in_token)
+ if not out_token:
+ break
+
+ auth_header = '%s %s' % (auth_protocol, to_native(base64.b64encode(out_token)))
+ req.add_unredirected_header('Authorization', auth_header)
+ resp = self.parent.open(req)
+
+ # The response could contain a token that the client uses to validate the server
+ auth_header = self.get_auth_value(resp.headers)
+ if not auth_header:
+ break
+ in_token = auth_header[1]
+
+ return resp
+
+except ImportError:
+ GSSAPI_IMP_ERR = traceback.format_exc()
+ HTTPGSSAPIAuthHandler = None
+
if not HAS_MATCH_HOSTNAME:
# The following block of code is under the terms and conditions of the
# Python Software Foundation License
@@ -408,6 +500,13 @@ class NoSSLError(SSLValidationError):
pass
+class MissingModuleError(Exception):
+ """Failed to import 3rd party module required by the caller"""
+ def __init__(self, message, import_traceback):
+ super(MissingModuleError, self).__init__(message)
+ self.import_traceback = import_traceback
+
+
# Some environments (Google Compute Engine's CoreOS deploys) do not compile
# against openssl and thus do not have any HTTPS support.
CustomHTTPSConnection = None
@@ -824,9 +923,7 @@ class SSLValidationHandler(urllib_request.BaseHandler):
to_native(f.read(), errors='surrogate_or_strict')
)
)
- else:
- ca_certs.append(f.read())
- return ca_certs, cadata, paths_checked
+ return self.ca_path, cadata, paths_checked
if not HAS_SSLCONTEXT:
paths_checked.append('/etc/ssl/certs')
@@ -900,6 +997,8 @@ class SSLValidationHandler(urllib_request.BaseHandler):
if HAS_SSLCONTEXT:
default_verify_paths = ssl.get_default_verify_paths()
paths_checked[:0] = [default_verify_paths.capath]
+ else:
+ os.close(tmp_fd)
return (tmp_path, cadata, paths_checked)
@@ -1030,6 +1129,43 @@ def maybe_add_ssl_handler(url, validate_certs, ca_path=None):
return SSLValidationHandler(parsed.hostname, parsed.port or 443, ca_path=ca_path)
+def getpeercert(response, binary_form=False):
+ """ Attempt to get the peer certificate of the response from urlopen. """
+ # The response from urllib2.open() is different across Python 2 and 3
+ if PY3:
+ socket = response.fp.raw._sock
+ else:
+ socket = response.fp._sock.fp._sock
+
+ try:
+ return socket.getpeercert(binary_form)
+ except AttributeError:
+ pass # Not HTTPS
+
+
+def get_channel_binding_cert_hash(certificate_der):
+ """ Gets the channel binding app data for a TLS connection using the peer cert. """
+ if not HAS_CRYPTOGRAPHY:
+ return
+
+ # Logic documented in RFC 5929 section 4 https://tools.ietf.org/html/rfc5929#section-4
+ cert = x509.load_der_x509_certificate(certificate_der, default_backend())
+
+ hash_algorithm = None
+ try:
+ hash_algorithm = cert.signature_hash_algorithm
+ except UnsupportedAlgorithm:
+ pass
+
+ # If the signature hash algorithm is unknown/unsupported or md5/sha1 we must use SHA256.
+ if not hash_algorithm or hash_algorithm.name in ['md5', 'sha1']:
+ hash_algorithm = hashes.SHA256()
+
+ digest = hashes.Hash(hash_algorithm, default_backend())
+ digest.update(certificate_der)
+ return digest.finalize()
+
+
def rfc2822_date_string(timetuple, zone='-0000'):
"""Accepts a timetuple and optional zone which defaults to ``-0000``
and returns a date string as specified by RFC 2822, e.g.:
@@ -1174,15 +1310,13 @@ class Request:
ssl_handler = maybe_add_ssl_handler(url, validate_certs, ca_path=ca_path)
if ssl_handler and not HAS_SSLCONTEXT:
handlers.append(ssl_handler)
- if HAS_GSSAPI and use_gssapi:
- handlers.append(urllib_gssapi.HTTPSPNEGOAuthHandler())
parsed = generic_urlparse(urlparse(url))
if parsed.scheme != 'ftp':
username = url_username
+ password = url_password
if username:
- password = url_password
netloc = parsed.netloc
elif '@' in parsed.netloc:
credentials, netloc = parsed.netloc.split('@', 1)
@@ -1198,7 +1332,15 @@ class Request:
# reconstruct url without credentials
url = urlunparse(parsed_list)
- if username and not force_basic_auth:
+ if use_gssapi:
+ if HTTPGSSAPIAuthHandler:
+ handlers.append(HTTPGSSAPIAuthHandler(username, password))
+ else:
+ imp_err_msg = missing_required_lib('gssapi', reason='for use_gssapi=True',
+ url='https://pypi.org/project/gssapi/')
+ raise MissingModuleError(imp_err_msg, import_traceback=GSSAPI_IMP_ERR)
+
+ elif username and not force_basic_auth:
passman = urllib_request.HTTPPasswordMgrWithDefaultRealm()
# this creates a password manager
@@ -1541,6 +1683,7 @@ def url_argument_spec():
force_basic_auth=dict(type='bool', default=False),
client_cert=dict(type='path'),
client_key=dict(type='path'),
+ use_gssapi=dict(type='bool', default=False),
)
@@ -1601,6 +1744,7 @@ def fetch_url(module, url, data=None, headers=None, method=None,
client_cert = module.params.get('client_cert')
client_key = module.params.get('client_key')
+ use_gssapi = module.params.get('use_gssapi', use_gssapi)
if not isinstance(cookies, cookiejar.CookieJar):
cookies = cookiejar.LWPCookieJar()
@@ -1653,6 +1797,8 @@ def fetch_url(module, url, data=None, headers=None, method=None,
module.fail_json(msg='%s' % to_native(e), **info)
except (ConnectionError, ValueError) as e:
module.fail_json(msg=to_native(e), **info)
+ except MissingModuleError as e:
+ module.fail_json(msg=to_text(e), exception=e.import_traceback)
except urllib_error.HTTPError as e:
try:
body = e.read()
diff --git a/lib/ansible/module_utils/yumdnf.py b/lib/ansible/module_utils/yumdnf.py
index 0d715bfc..64076709 100644
--- a/lib/ansible/module_utils/yumdnf.py
+++ b/lib/ansible/module_utils/yumdnf.py
@@ -29,13 +29,13 @@ yumdnf_argument_spec = dict(
conf_file=dict(type='str'),
disable_excludes=dict(type='str', default=None),
disable_gpg_check=dict(type='bool', default=False),
- disable_plugin=dict(type='list', default=[]),
- disablerepo=dict(type='list', default=[]),
+ disable_plugin=dict(type='list', elements='str', default=[]),
+ disablerepo=dict(type='list', elements='str', default=[]),
download_only=dict(type='bool', default=False),
download_dir=dict(type='str', default=None),
- enable_plugin=dict(type='list', default=[]),
- enablerepo=dict(type='list', default=[]),
- exclude=dict(type='list', default=[]),
+ enable_plugin=dict(type='list', elements='str', default=[]),
+ enablerepo=dict(type='list', elements='str', default=[]),
+ exclude=dict(type='list', elements='str', default=[]),
installroot=dict(type='str', default="/"),
install_repoquery=dict(type='bool', default=True),
install_weak_deps=dict(type='bool', default=True),
diff --git a/lib/ansible/modules/add_host.py b/lib/ansible/modules/add_host.py
index 3d8f8f74..f48140b8 100644
--- a/lib/ansible/modules/add_host.py
+++ b/lib/ansible/modules/add_host.py
@@ -37,6 +37,7 @@ notes:
- Since Ansible 2.4, the C(inventory_dir) variable is now set to C(None) instead of the 'global inventory source',
because you can now have multiple sources. An example was added that shows how to partially restore the previous behaviour.
- Windows targets are supported by this module.
+- Though this module does not change the remote host, we do provide 'changed' status as it can be useful for those trying to track inventory changes.
seealso:
- module: ansible.builtin.group_by
author:
diff --git a/lib/ansible/modules/apt.py b/lib/ansible/modules/apt.py
index 144a27d2..d6b038ac 100644
--- a/lib/ansible/modules/apt.py
+++ b/lib/ansible/modules/apt.py
@@ -30,12 +30,14 @@ options:
description:
- Indicates the desired package state. C(latest) ensures that the latest version is installed. C(build-dep) ensures the package build dependencies
are installed. C(fixed) attempt to correct a system with broken dependencies in place.
+ type: str
default: present
choices: [ absent, build-dep, latest, present, fixed ]
update_cache:
description:
- Run the equivalent of C(apt-get update) before the operation. Can be run as part of the package installation or as a separate step.
- Default is not to update the cache.
+ aliases: [ update-cache ]
type: bool
update_cache_retries:
description:
@@ -51,8 +53,9 @@ options:
version_added: '2.10'
cache_valid_time:
description:
- - Update the apt cache if its older than the I(cache_valid_time). This option is set in seconds.
+ - Update the apt cache if it is older than the I(cache_valid_time). This option is set in seconds.
- As of Ansible 2.4, if explicitly set, this sets I(update_cache=yes).
+ type: int
default: 0
purge:
description:
@@ -62,11 +65,13 @@ options:
default_release:
description:
- Corresponds to the C(-t) option for I(apt) and sets pin priorities
+ aliases: [ default-release ]
+ type: str
install_recommends:
description:
- Corresponds to the C(--no-install-recommends) option for I(apt). C(yes) installs recommended packages. C(no) does not install
recommended packages. By default, Ansible will use the same defaults as the operating system. Suggested packages are never installed.
- aliases: ['install-recommends']
+ aliases: [ install-recommends ]
type: bool
force:
description:
@@ -82,6 +87,7 @@ options:
description:
- Ignore if packages cannot be authenticated. This is useful for bootstrapping environments that manage their own apt-key setup.
- 'C(allow_unauthenticated) is only supported with state: I(install)/I(present)'
+ aliases: [ allow-unauthenticated ]
type: bool
default: 'no'
version_added: "2.1"
@@ -95,16 +101,19 @@ options:
version_added: "1.1"
choices: [ dist, full, 'no', safe, 'yes' ]
default: 'no'
+ type: str
dpkg_options:
description:
- Add dpkg options to apt command. Defaults to '-o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold"'
- Options should be supplied as comma separated list
default: force-confdef,force-confold
+ type: str
deb:
description:
- Path to a .deb package on the remote machine.
- If :// in the path, ansible will attempt to download deb before installing. (Version added 2.1)
- Requires the C(xz-utils) package to extract the control file of the deb package to install.
+ type: path
required: false
version_added: "1.6"
autoremove:
@@ -124,7 +133,7 @@ options:
description:
- Force the exit code of /usr/sbin/policy-rc.d.
- For example, if I(policy_rc_d=101) the installed package will not trigger a service start.
- - If /usr/sbin/policy-rc.d already exist, it is backed up and restored after the package installation.
+ - If /usr/sbin/policy-rc.d already exists, it is backed up and restored after the package installation.
- If C(null), the /usr/sbin/policy-rc.d isn't created/changed.
type: int
default: null
@@ -135,6 +144,14 @@ options:
type: bool
default: 'no'
version_added: "2.1"
+ fail_on_autoremove:
+ description:
+ - 'Corresponds to the C(--no-remove) option for C(apt).'
+ - 'If C(yes), it is ensured that no packages will be removed or the task will fail.'
+ - 'C(fail_on_autoremove) is only supported with state except C(absent)'
+ type: bool
+ default: 'no'
+ version_added: "2.11"
force_apt_get:
description:
- Force usage of apt-get instead of aptitude
@@ -194,6 +211,12 @@ EXAMPLES = '''
default_release: squeeze-backports
update_cache: yes
+- name: Install zfsutils-linux with ensuring conflicted packages (e.g. zfs-fuse) will not be removed.
+ apt:
+ name: zfsutils-linux
+ state: latest
+ fail_on_autoremove: yes
+
- name: Install latest version of "openjdk-6-jdk" ignoring "install-recommends"
apt:
name: openjdk-6-jdk
@@ -298,7 +321,9 @@ import random
import time
from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.respawn import has_respawned, probe_interpreters_for_module, respawn_module
from ansible.module_utils._text import to_bytes, to_native
+from ansible.module_utils.six import PY3
from ansible.module_utils.urls import fetch_file
# APT related constants
@@ -327,18 +352,16 @@ CLEAN_OP_CHANGED_STR = dict(
autoclean='Del ',
)
-HAS_PYTHON_APT = True
+apt = apt_pkg = None # keep pylint happy by declaring unconditionally
+
+HAS_PYTHON_APT = False
try:
import apt
import apt.debfile
import apt_pkg
+ HAS_PYTHON_APT = True
except ImportError:
- HAS_PYTHON_APT = False
-
-if sys.version_info[0] < 3:
- PYTHON_APT = 'python-apt'
-else:
- PYTHON_APT = 'python3-apt'
+ pass
class PolicyRcD(object):
@@ -357,7 +380,7 @@ class PolicyRcD(object):
if self.m.params['policy_rc_d'] is None:
return
- # if the /usr/sbin/policy-rc.d already exist
+ # if the /usr/sbin/policy-rc.d already exists
# we will back it up during package installation
# then restore it
if os.path.exists('/usr/sbin/policy-rc.d'):
@@ -367,21 +390,21 @@ class PolicyRcD(object):
def __enter__(self):
"""
- This method will be call when we enter the context, before we call `apt-get …`
+ This method will be called when we enter the context, before we call `apt-get …`
"""
# if policy_rc_d is null then we don't need to modify policy-rc.d
if self.m.params['policy_rc_d'] is None:
return
- # if the /usr/sbin/policy-rc.d already exist we back it up
+ # if the /usr/sbin/policy-rc.d already exists we back it up
if self.backup_dir:
try:
shutil.move('/usr/sbin/policy-rc.d', self.backup_dir)
except Exception:
self.m.fail_json(msg="Fail to move /usr/sbin/policy-rc.d to %s" % self.backup_dir)
- # we write /usr/sbin/policy-rc.d so it always exit with code policy_rc_d
+ # we write /usr/sbin/policy-rc.d so it always exits with code policy_rc_d
try:
with open('/usr/sbin/policy-rc.d', 'w') as policy_rc_d:
policy_rc_d.write('#!/bin/sh\nexit %d\n' % self.m.params['policy_rc_d'])
@@ -392,7 +415,7 @@ class PolicyRcD(object):
def __exit__(self, type, value, traceback):
"""
- This method will be call when we enter the context, before we call `apt-get …`
+ This method will be called when we enter the context, before we call `apt-get …`
"""
# if policy_rc_d is null then we don't need to modify policy-rc.d
@@ -405,12 +428,12 @@ class PolicyRcD(object):
try:
shutil.move(os.path.join(self.backup_dir, 'policy-rc.d'),
'/usr/sbin/policy-rc.d')
- os.rmdir(self.tmpdir_name)
+ os.rmdir(self.backup_dir)
except Exception:
self.m.fail_json(msg="Fail to move back %s to /usr/sbin/policy-rc.d"
% os.path.join(self.backup_dir, 'policy-rc.d'))
else:
- # if they wheren't any /usr/sbin/policy-rc.d file before the call to __enter__
+ # if there wasn't a /usr/sbin/policy-rc.d file before the call to __enter__
# we just remove the file
try:
os.remove('/usr/sbin/policy-rc.d')
@@ -624,7 +647,7 @@ def mark_installed_manually(m, packages):
def install(m, pkgspec, cache, upgrade=False, default_release=None,
install_recommends=None, force=False,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS),
- build_dep=False, fixed=False, autoremove=False, only_upgrade=False,
+ build_dep=False, fixed=False, autoremove=False, fail_on_autoremove=False, only_upgrade=False,
allow_unauthenticated=False):
pkg_list = []
packages = ""
@@ -667,6 +690,11 @@ def install(m, pkgspec, cache, upgrade=False, default_release=None,
else:
autoremove = ''
+ if fail_on_autoremove:
+ fail_on_autoremove = '--no-remove'
+ else:
+ fail_on_autoremove = ''
+
if only_upgrade:
only_upgrade = '--only-upgrade'
else:
@@ -678,9 +706,10 @@ def install(m, pkgspec, cache, upgrade=False, default_release=None,
fixed = ''
if build_dep:
- cmd = "%s -y %s %s %s %s %s build-dep %s" % (APT_GET_CMD, dpkg_options, only_upgrade, fixed, force_yes, check_arg, packages)
+ cmd = "%s -y %s %s %s %s %s %s build-dep %s" % (APT_GET_CMD, dpkg_options, only_upgrade, fixed, force_yes, fail_on_autoremove, check_arg, packages)
else:
- cmd = "%s -y %s %s %s %s %s %s install %s" % (APT_GET_CMD, dpkg_options, only_upgrade, fixed, force_yes, autoremove, check_arg, packages)
+ cmd = "%s -y %s %s %s %s %s %s %s install %s" % \
+ (APT_GET_CMD, dpkg_options, only_upgrade, fixed, force_yes, autoremove, fail_on_autoremove, check_arg, packages)
if default_release:
cmd += " -t '%s'" % (default_release,)
@@ -730,7 +759,7 @@ def get_field_of_deb(m, deb_file, field="Version"):
return to_native(stdout).strip('\n')
-def install_deb(m, debs, cache, force, install_recommends, allow_unauthenticated, dpkg_options):
+def install_deb(m, debs, cache, force, fail_on_autoremove, install_recommends, allow_unauthenticated, dpkg_options):
changed = False
deps_to_install = []
pkgs_to_install = []
@@ -772,6 +801,7 @@ def install_deb(m, debs, cache, force, install_recommends, allow_unauthenticated
if deps_to_install:
(success, retvals) = install(m=m, pkgspec=deps_to_install, cache=cache,
install_recommends=install_recommends,
+ fail_on_autoremove=fail_on_autoremove,
allow_unauthenticated=allow_unauthenticated,
dpkg_options=expand_dpkg_options(dpkg_options))
if not success:
@@ -901,7 +931,7 @@ def cleanup(m, purge=False, force=False, operation=None,
def upgrade(m, mode="yes", force=False, default_release=None,
use_apt_get=False,
- dpkg_options=expand_dpkg_options(DPKG_OPTIONS), autoremove=False,
+ dpkg_options=expand_dpkg_options(DPKG_OPTIONS), autoremove=False, fail_on_autoremove=False,
allow_unauthenticated=False,
):
@@ -943,6 +973,11 @@ def upgrade(m, mode="yes", force=False, default_release=None,
else:
force_yes = ''
+ if fail_on_autoremove:
+ fail_on_autoremove = '--no-remove'
+ else:
+ fail_on_autoremove = ''
+
allow_unauthenticated = '--allow-unauthenticated' if allow_unauthenticated else ''
if apt_cmd is None:
@@ -953,8 +988,7 @@ def upgrade(m, mode="yes", force=False, default_release=None,
"to have APTITUDE in path or use 'force_apt_get=True'")
apt_cmd_path = m.get_bin_path(apt_cmd, required=True)
- cmd = '%s -y %s %s %s %s %s' % (apt_cmd_path, dpkg_options, force_yes, allow_unauthenticated,
- check_arg, upgrade_command)
+ cmd = '%s -y %s %s %s %s %s %s' % (apt_cmd_path, dpkg_options, force_yes, fail_on_autoremove, allow_unauthenticated, check_arg, upgrade_command)
if default_release:
cmd += " -t '%s'" % (default_release,)
@@ -1036,10 +1070,11 @@ def main():
default_release=dict(type='str', aliases=['default-release']),
install_recommends=dict(type='bool', aliases=['install-recommends']),
force=dict(type='bool', default=False),
- upgrade=dict(type='str', choices=['dist', 'full', 'no', 'safe', 'yes']),
+ upgrade=dict(type='str', choices=['dist', 'full', 'no', 'safe', 'yes'], default='no'),
dpkg_options=dict(type='str', default=DPKG_OPTIONS),
autoremove=dict(type='bool', default=False),
autoclean=dict(type='bool', default=False),
+ fail_on_autoremove=dict(type='bool', default=False),
policy_rc_d=dict(type='int', default=None),
only_upgrade=dict(type='bool', default=False),
force_apt_get=dict(type='bool', default=False),
@@ -1053,26 +1088,59 @@ def main():
module.run_command_environ_update = APT_ENV_VARS
if not HAS_PYTHON_APT:
+ # This interpreter can't see the apt Python library- we'll do the following to try and fix that:
+ # 1) look in common locations for system-owned interpreters that can see it; if we find one, respawn under it
+ # 2) finding none, try to install a matching python-apt package for the current interpreter version;
+ # we limit to the current interpreter version to try and avoid installing a whole other Python just
+ # for apt support
+ # 3) if we installed a support package, try to respawn under what we think is the right interpreter (could be
+ # the current interpreter again, but we'll let it respawn anyway for simplicity)
+ # 4) if still not working, return an error and give up (some corner cases not covered, but this shouldn't be
+ # made any more complex than it already is to try and cover more, eg, custom interpreters taking over
+ # system locations)
+
+ apt_pkg_name = 'python3-apt' if PY3 else 'python-apt'
+
+ if has_respawned():
+ # this shouldn't be possible; short-circuit early if it happens...
+ module.fail_json(msg="{0} must be installed and visible from {1}.".format(apt_pkg_name, sys.executable))
+
+ interpreters = ['/usr/bin/python3', '/usr/bin/python2', '/usr/bin/python']
+
+ interpreter = probe_interpreters_for_module(interpreters, 'apt')
+
+ if interpreter:
+ # found the Python bindings; respawn this module under the interpreter where we found them
+ respawn_module(interpreter)
+ # this is the end of the line for this process, it will exit here once the respawned module has completed
+
+ # don't make changes if we're in check_mode
if module.check_mode:
module.fail_json(msg="%s must be installed to use check mode. "
- "If run normally this module can auto-install it." % PYTHON_APT)
- try:
- # We skip cache update in auto install the dependency if the
- # user explicitly declared it with update_cache=no.
- if module.params.get('update_cache') is False:
- module.warn("Auto-installing missing dependency without updating cache: %s" % PYTHON_APT)
- else:
- module.warn("Updating cache and auto-installing missing dependency: %s" % PYTHON_APT)
- module.run_command(['apt-get', 'update'], check_rc=True)
-
- module.run_command(['apt-get', 'install', '--no-install-recommends', PYTHON_APT, '-y', '-q'], check_rc=True)
- global apt, apt_pkg
- import apt
- import apt.debfile
- import apt_pkg
- except ImportError:
- module.fail_json(msg="Could not import python modules: apt, apt_pkg. "
- "Please install %s package." % PYTHON_APT)
+ "If run normally this module can auto-install it." % apt_pkg_name)
+
+ # We skip cache update in auto install the dependency if the
+ # user explicitly declared it with update_cache=no.
+ if module.params.get('update_cache') is False:
+ module.warn("Auto-installing missing dependency without updating cache: %s" % apt_pkg_name)
+ else:
+ module.warn("Updating cache and auto-installing missing dependency: %s" % apt_pkg_name)
+ module.run_command(['apt-get', 'update'], check_rc=True)
+
+ # try to install the apt Python binding
+ module.run_command(['apt-get', 'install', '--no-install-recommends', apt_pkg_name, '-y', '-q'], check_rc=True)
+
+ # try again to find the bindings in common places
+ interpreter = probe_interpreters_for_module(interpreters, 'apt')
+
+ if interpreter:
+ # found the Python bindings; respawn this module under the interpreter where we found them
+ # NB: respawn is somewhat wasteful if it's this interpreter, but simplifies the code
+ respawn_module(interpreter)
+ # this is the end of the line for this process, it will exit here once the respawned module has completed
+ else:
+ # we've done all we can do; just tell the user it's busted and get out
+ module.fail_json(msg="{0} must be installed and visible from {1}.".format(apt_pkg_name, sys.executable))
global APTITUDE_CMD
APTITUDE_CMD = module.get_bin_path("aptitude", False)
@@ -1095,6 +1163,7 @@ def main():
allow_unauthenticated = p['allow_unauthenticated']
dpkg_options = expand_dpkg_options(p['dpkg_options'])
autoremove = p['autoremove']
+ fail_on_autoremove = p['fail_on_autoremove']
autoclean = p['autoclean']
# Get the cache object
@@ -1156,7 +1225,7 @@ def main():
force_yes = p['force']
if p['upgrade']:
- upgrade(module, p['upgrade'], force_yes, p['default_release'], use_apt_get, dpkg_options, autoremove, allow_unauthenticated)
+ upgrade(module, p['upgrade'], force_yes, p['default_release'], use_apt_get, dpkg_options, autoremove, fail_on_autoremove, allow_unauthenticated)
if p['deb']:
if p['state'] != 'present':
@@ -1166,7 +1235,7 @@ def main():
install_deb(module, p['deb'], cache,
install_recommends=install_recommends,
allow_unauthenticated=allow_unauthenticated,
- force=force_yes, dpkg_options=p['dpkg_options'])
+ force=force_yes, fail_on_autoremove=fail_on_autoremove, dpkg_options=p['dpkg_options'])
unfiltered_packages = p['package'] or ()
packages = [package.strip() for package in unfiltered_packages if package != '*']
@@ -1176,7 +1245,7 @@ def main():
if latest and all_installed:
if packages:
module.fail_json(msg='unable to install additional packages when upgrading all installed packages')
- upgrade(module, 'yes', force_yes, p['default_release'], use_apt_get, dpkg_options, autoremove, allow_unauthenticated)
+ upgrade(module, 'yes', force_yes, p['default_release'], use_apt_get, dpkg_options, autoremove, fail_on_autoremove, allow_unauthenticated)
if packages:
for package in packages:
@@ -1214,6 +1283,7 @@ def main():
build_dep=state_builddep,
fixed=state_fixed,
autoremove=autoremove,
+ fail_on_autoremove=fail_on_autoremove,
only_upgrade=p['only_upgrade'],
allow_unauthenticated=allow_unauthenticated
)
diff --git a/lib/ansible/modules/apt_key.py b/lib/ansible/modules/apt_key.py
index 10570813..4a8e968c 100644
--- a/lib/ansible/modules/apt_key.py
+++ b/lib/ansible/modules/apt_key.py
@@ -20,7 +20,8 @@ short_description: Add or remove an apt key
description:
- Add or remove an I(apt) key, optionally downloading it.
notes:
- - Doesn't download the key unless it really needs it.
+ - The apt-key command has been deprecated and suggests to 'manage keyring files in trusted.gpg.d instead'. See the Debian wiki for details.
+ This module is kept for backwards compatiblity for systems that still use apt-key as the main way to manage apt repository keys.
- As a sanity check, downloaded key id must match the one specified.
- "Use full fingerprint (40 characters) key ids to avoid key collisions.
To generate a full-fingerprint imported key: C(apt-key adv --list-public-keys --with-fingerprint --with-colons)."
@@ -36,26 +37,33 @@ options:
- Including this allows check mode to correctly report the changed state.
- If specifying a subkey's id be aware that apt-key does not understand how to remove keys via a subkey id. Specify the primary key's id instead.
- This parameter is required when C(state) is set to C(absent).
+ type: str
data:
description:
- The keyfile contents to add to the keyring.
+ type: str
file:
description:
- The path to a keyfile on the remote server to add to the keyring.
+ type: path
keyring:
description:
- The full path to specific keyring file in C(/etc/apt/trusted.gpg.d/).
+ type: path
version_added: "1.3"
url:
description:
- The URL to retrieve key from.
+ type: str
keyserver:
description:
- The keyserver to retrieve key from.
+ type: str
version_added: "1.6"
state:
description:
- Ensures that the key is present (added) or absent (revoked).
+ type: str
choices: [ absent, present ]
default: present
validate_certs:
@@ -107,7 +115,40 @@ EXAMPLES = '''
state: present
'''
-RETURN = '''#'''
+RETURN = '''
+after:
+ description: List of apt key ids or fingerprints after any modification
+ returned: on change
+ type: list
+ sample: ["D8576A8BA88D21E9", "3B4FE6ACC0B21F32", "D94AA3F0EFE21092", "871920D1991BC93C"]
+before:
+ description: List of apt key ids or fingprints before any modifications
+ returned: always
+ type: list
+ sample: ["3B4FE6ACC0B21F32", "D94AA3F0EFE21092", "871920D1991BC93C"]
+fp:
+ description: Fingerprint of the key to import
+ returned: always
+ type: str
+ sample: "D8576A8BA88D21E9"
+id:
+ description: key id from source
+ returned: always
+ type: str
+ sample: "36A1D7869245C8950F966E92D8576A8BA88D21E9"
+key_id:
+ description: calculated key id, it should be same as 'id', but can be different
+ returned: always
+ type: str
+ sample: "36A1D7869245C8950F966E92D8576A8BA88D21E9"
+short_id:
+ description: caclulated short key id
+ returned: always
+ type: str
+ sample: "A88D21E9"
+'''
+
+import os
# FIXME: standardize into module_common
from traceback import format_exc
@@ -118,18 +159,28 @@ from ansible.module_utils.urls import fetch_url
apt_key_bin = None
+gpg_bin = None
+lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
def find_needed_binaries(module):
global apt_key_bin
-
+ global gpg_bin
apt_key_bin = module.get_bin_path('apt-key', required=True)
+ gpg_bin = module.get_bin_path('gpg', required=True)
+
+
+def add_http_proxy(cmd):
- # FIXME: Is there a reason that gpg and grep are checked? Is it just
- # cruft or does the apt .deb package not require them (and if they're not
- # installed, /usr/bin/apt-key fails?)
- module.get_bin_path('gpg', required=True)
- module.get_bin_path('grep', required=True)
+ for envvar in ('HTTPS_PROXY', 'https_proxy', 'HTTP_PROXY', 'http_proxy'):
+ proxy = os.environ.get(envvar)
+ if proxy:
+ break
+
+ if proxy:
+ cmd += ' --keyserver-options http-proxy=%s' % proxy
+
+ return cmd
def parse_key_id(key_id):
@@ -150,7 +201,7 @@ def parse_key_id(key_id):
"""
# Make sure the key_id is valid hexadecimal
- int(key_id, 16)
+ int(to_native(key_id), 16)
key_id = key_id.upper()
if key_id.startswith('0X'):
@@ -169,23 +220,41 @@ def parse_key_id(key_id):
return short_key_id, fingerprint, key_id
+def parse_output_for_keys(output, short_format=False):
+
+ found = []
+ lines = to_native(output).split('\n')
+ for line in lines:
+ if (line.startswith("pub") or line.startswith("sub")) and "expired" not in line:
+ try:
+ # apt key format
+ tokens = line.split()
+ code = tokens[1]
+ (len_type, real_code) = code.split("/")
+ except (IndexError, ValueError):
+ # gpg format
+ try:
+ tokens = line.split(':')
+ real_code = tokens[4]
+ except (IndexError, ValueError):
+ # invalid line, skip
+ continue
+ found.append(real_code)
+
+ if found and short_format:
+ found = shorten_key_ids(found)
+
+ return found
+
+
def all_keys(module, keyring, short_format):
- if keyring:
+ if keyring is not None:
cmd = "%s --keyring %s adv --list-public-keys --keyid-format=long" % (apt_key_bin, keyring)
else:
cmd = "%s adv --list-public-keys --keyid-format=long" % apt_key_bin
(rc, out, err) = module.run_command(cmd)
- results = []
- lines = to_native(out).split('\n')
- for line in lines:
- if (line.startswith("pub") or line.startswith("sub")) and "expired" not in line:
- tokens = line.split()
- code = tokens[1]
- (len_type, real_code) = code.split("/")
- results.append(real_code)
- if short_format:
- results = shorten_key_ids(results)
- return results
+
+ return parse_output_for_keys(out, short_format)
def shorten_key_ids(key_id_list):
@@ -200,13 +269,10 @@ def shorten_key_ids(key_id_list):
def download_key(module, url):
- # FIXME: move get_url code to common, allow for in-memory D/L, support proxies
- # and reuse here
- if url is None:
- module.fail_json(msg="needed a URL but was not specified")
try:
- rsp, info = fetch_url(module, url)
+ # note: validate_certs and other args are pulled from module directly
+ rsp, info = fetch_url(module, url, use_proxy=True)
if info['status'] != 200:
module.fail_json(msg="Failed to download key at %s: %s" % (url, info['msg']))
@@ -215,13 +281,44 @@ def download_key(module, url):
module.fail_json(msg="error getting key id from url: %s" % url, traceback=format_exc())
+def get_key_id_from_file(module, filename, data=None):
+
+ native_data = to_native(data)
+ is_armored = native_data.find("-----BEGIN PGP PUBLIC KEY BLOCK-----") >= 0
+
+ global lang_env
+ key = None
+
+ cmd = [gpg_bin, '--with-colons', filename]
+
+ (rc, out, err) = module.run_command(cmd, environ_update=lang_env, data=(native_data if is_armored else data), binary_data=not is_armored)
+ if rc != 0:
+ module.fail_json(msg="Unable to extract key from '%s'" % ('inline data' if data is not None else filename), stdout=out, stderr=err)
+
+ keys = parse_output_for_keys(out)
+ # assume we only want first key?
+ if keys:
+ key = keys[0]
+
+ return key
+
+
+def get_key_id_from_data(module, data):
+ return get_key_id_from_file(module, '-', data)
+
+
def import_key(module, keyring, keyserver, key_id):
+
+ global lang_env
if keyring:
cmd = "%s --keyring %s adv --no-tty --keyserver %s --recv %s" % (apt_key_bin, keyring, keyserver, key_id)
else:
cmd = "%s adv --no-tty --keyserver %s --recv %s" % (apt_key_bin, keyserver, key_id)
+
+ # check for proxy
+ cmd = add_http_proxy(cmd)
+
for retry in range(5):
- lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
(rc, out, err) = module.run_command(cmd, environ_update=lang_env)
if rc == 0:
break
@@ -269,16 +366,17 @@ def main():
url=dict(type='str'),
data=dict(type='str'),
file=dict(type='path'),
- key=dict(type='str'),
+ key=dict(type='str', removed_in_version='2.14', removed_from_collection='ansible.builtin', no_log=False),
keyring=dict(type='path'),
validate_certs=dict(type='bool', default=True),
keyserver=dict(type='str'),
state=dict(type='str', default='present', choices=['absent', 'present']),
),
supports_check_mode=True,
- mutually_exclusive=(('data', 'filename', 'keyserver', 'url'),),
+ mutually_exclusive=(('data', 'file', 'keyserver', 'url'),),
)
+ # parameters
key_id = module.params['id']
url = module.params['url']
data = module.params['data']
@@ -286,72 +384,91 @@ def main():
keyring = module.params['keyring']
state = module.params['state']
keyserver = module.params['keyserver']
- changed = False
- fingerprint = short_key_id = key_id
+ # internal vars
short_format = False
- if key_id:
- try:
- short_key_id, fingerprint, key_id = parse_key_id(key_id)
- except ValueError:
- module.fail_json(msg='Invalid key_id', id=key_id)
-
- if len(fingerprint) == 8:
- short_format = True
+ short_key_id = None
+ fingerprint = None
+ error_no_error = "apt-key did not return an error, but %s (check that the id is correct and *not* a subkey)"
+ # ensure we have requirements met
find_needed_binaries(module)
- keys = all_keys(module, keyring, short_format)
- return_values = {}
+ # initialize result dict
+ r = {'changed': False}
- if state == 'present':
- if fingerprint and fingerprint in keys:
- module.exit_json(changed=False)
- elif fingerprint and fingerprint not in keys and module.check_mode:
- # TODO: Someday we could go further -- write keys out to
- # a temporary file and then extract the key id from there via gpg
- # to decide if the key is installed or not.
- module.exit_json(changed=True)
- else:
- if not filename and not data and not keyserver:
- data = download_key(module, url)
+ if not key_id:
+
+ if keyserver:
+ module.fail_json(msg="Missing key_id, required with keyserver.")
+
+ if url:
+ data = download_key(module, url)
- if filename:
- add_key(module, filename, keyring)
- elif keyserver:
- import_key(module, keyring, keyserver, key_id)
- else:
- add_key(module, "-", keyring, data)
+ if filename:
+ key_id = get_key_id_from_file(module, filename)
+ elif data:
+ key_id = get_key_id_from_data(module, data)
- changed = False
- keys2 = all_keys(module, keyring, short_format)
- if len(keys) != len(keys2):
- changed = True
+ r['id'] = key_id
+ try:
+ short_key_id, fingerprint, key_id = parse_key_id(key_id)
+ r['short_id'] = short_key_id
+ r['fp'] = fingerprint
+ r['key_id'] = key_id
+ except ValueError:
+ module.fail_json(msg='Invalid key_id', **r)
+
+ if not fingerprint:
+ # invalid key should fail well before this point, but JIC ...
+ module.fail_json(msg="Unable to continue as we could not extract a valid fingerprint to compare against existing keys.", **r)
+
+ if len(key_id) == 8:
+ short_format = True
- if fingerprint and fingerprint not in keys2:
- module.fail_json(msg="key does not seem to have been added", id=key_id)
- module.exit_json(changed=changed)
+ # get existing keys to verify if we need to change
+ r['before'] = keys = all_keys(module, keyring, short_format)
+ keys2 = []
+
+ if state == 'present':
+ if (short_format and short_key_id not in keys) or (not short_format and fingerprint not in keys):
+ r['changed'] = True
+ if not module.check_mode:
+ if filename:
+ add_key(module, filename, keyring)
+ elif keyserver:
+ import_key(module, keyring, keyserver, key_id)
+ elif data:
+ # this also takes care of url if key_id was not provided
+ add_key(module, "-", keyring, data)
+ elif url:
+ # we hit this branch only if key_id is supplied with url
+ data = download_key(module, url)
+ add_key(module, "-", keyring, data)
+ else:
+ module.fail_json(msg="No key to add ... how did i get here?!?!", **r)
+
+ # verify it got added
+ r['after'] = keys2 = all_keys(module, keyring, short_format)
+ if (short_format and short_key_id not in keys2) or (not short_format and fingerprint not in keys2):
+ module.fail_json(msg=error_no_error % 'failed to add the key', **r)
elif state == 'absent':
if not key_id:
- module.fail_json(msg="key is required")
+ module.fail_json(msg="key is required to remove a key", **r)
if fingerprint in keys:
- if module.check_mode:
- module.exit_json(changed=True)
-
- # we use the "short" id: key_id[-8:], short_format=True
- # it's a workaround for https://bugs.launchpad.net/ubuntu/+source/apt/+bug/1481871
- if remove_key(module, short_key_id, keyring):
- keys = all_keys(module, keyring, short_format)
- if fingerprint in keys:
- module.fail_json(msg="apt-key del did not return an error but the key was not removed (check that the id is correct and *not* a subkey)",
- id=key_id)
- changed = True
- else:
- # FIXME: module.fail_json or exit-json immediately at point of failure
- module.fail_json(msg="error removing key_id", **return_values)
-
- module.exit_json(changed=changed, **return_values)
+ r['changed'] = True
+ if not module.check_mode:
+ # we use the "short" id: key_id[-8:], short_format=True
+ # it's a workaround for https://bugs.launchpad.net/ubuntu/+source/apt/+bug/1481871
+ if short_key_id is not None and remove_key(module, short_key_id, keyring):
+ r['after'] = keys2 = all_keys(module, keyring, short_format)
+ if fingerprint in keys2:
+ module.fail_json(msg=error_no_error % 'the key was not removed', **r)
+ else:
+ module.fail_json(msg="error removing key_id", **r)
+
+ module.exit_json(**r)
if __name__ == '__main__':
diff --git a/lib/ansible/modules/apt_repository.py b/lib/ansible/modules/apt_repository.py
index de39424e..5331e89a 100644
--- a/lib/ansible/modules/apt_repository.py
+++ b/lib/ansible/modules/apt_repository.py
@@ -25,22 +25,26 @@ options:
repo:
description:
- A source string for the repository.
+ type: str
required: true
state:
description:
- A source string state.
+ type: str
choices: [ absent, present ]
default: "present"
mode:
description:
- - The octal mode for newly created files in sources.list.d
- default: '0644'
+ - The octal mode for newly created files in sources.list.d.
+ - Default is what system uses (probably 0644).
+ type: raw
version_added: "1.6"
update_cache:
description:
- Run the equivalent of C(apt-get update) when a change occurs. Cache updates are run after making changes.
type: bool
default: "yes"
+ aliases: [ update-cache ]
update_cache_retries:
description:
- Amount of retries if the cache update fails. Also see I(update_cache_retry_max_delay).
@@ -65,13 +69,25 @@ options:
- Sets the name of the source list file in sources.list.d.
Defaults to a file name based on the repository source url.
The .list extension will be automatically added.
+ type: str
version_added: '2.1'
codename:
description:
- Override the distribution codename to use for PPA repositories.
Should usually only be set when working with a PPA on
a non-Ubuntu target (for example, Debian or Mint).
+ type: str
version_added: '2.3'
+ install_python_apt:
+ description:
+ - Whether to automatically try to install the Python apt library or not, if it is not already installed.
+ Without this library, the module does not work.
+ - Runs C(apt-get install python-apt) for Python 2, and C(apt-get install python3-apt) for Python 3.
+ - Only works with the system Python 2 or Python 3. If you are using a Python on the remote that is not
+ the system Python, set I(install_python_apt=false) and ensure that the Python apt library
+ for your Python version is installed some other way.
+ type: bool
+ default: true
author:
- Alexander Saltanov (@sashka)
version_added: "0.7"
@@ -124,51 +140,44 @@ import copy
import random
import time
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.respawn import has_respawned, probe_interpreters_for_module, respawn_module
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import PY3
+from ansible.module_utils.urls import fetch_url
+
+# init module names to keep pylint happy
+apt = apt_pkg = aptsources_distro = distro = None
+
try:
import apt
import apt_pkg
import aptsources.distro as aptsources_distro
+
distro = aptsources_distro.get_distro()
+
HAVE_PYTHON_APT = True
except ImportError:
- distro = None
HAVE_PYTHON_APT = False
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils._text import to_native
-from ansible.module_utils.urls import fetch_url
-
-
-if sys.version_info[0] < 3:
- PYTHON_APT = 'python-apt'
-else:
- PYTHON_APT = 'python3-apt'
-
DEFAULT_SOURCES_PERM = 0o0644
VALID_SOURCE_TYPES = ('deb', 'deb-src')
-def install_python_apt(module):
+def install_python_apt(module, apt_pkg_name):
if not module.check_mode:
apt_get_path = module.get_bin_path('apt-get')
if apt_get_path:
rc, so, se = module.run_command([apt_get_path, 'update'])
if rc != 0:
- module.fail_json(msg="Failed to auto-install %s. Error was: '%s'" % (PYTHON_APT, se.strip()))
- rc, so, se = module.run_command([apt_get_path, 'install', PYTHON_APT, '-y', '-q'])
- if rc == 0:
- global apt, apt_pkg, aptsources_distro, distro, HAVE_PYTHON_APT
- import apt
- import apt_pkg
- import aptsources.distro as aptsources_distro
- distro = aptsources_distro.get_distro()
- HAVE_PYTHON_APT = True
- else:
- module.fail_json(msg="Failed to auto-install %s. Error was: '%s'" % (PYTHON_APT, se.strip()))
+ module.fail_json(msg="Failed to auto-install %s. Error was: '%s'" % (apt_pkg_name, se.strip()))
+ rc, so, se = module.run_command([apt_get_path, 'install', apt_pkg_name, '-y', '-q'])
+ if rc != 0:
+ module.fail_json(msg="Failed to auto-install %s. Error was: '%s'" % (apt_pkg_name, se.strip()))
else:
- module.fail_json(msg="%s must be installed to use check mode" % PYTHON_APT)
+ module.fail_json(msg="%s must be installed to use check mode" % apt_pkg_name)
class InvalidSource(Exception):
@@ -410,6 +419,12 @@ class UbuntuSourcesList(SourcesList):
self.codename = module.params['codename'] or distro.codename
super(UbuntuSourcesList, self).__init__(module)
+ def __deepcopy__(self, memo=None):
+ return UbuntuSourcesList(
+ self.module,
+ add_ppa_signing_keys_callback=self.add_ppa_signing_keys_callback
+ )
+
def _get_ppa_info(self, owner_name, ppa_name):
lp_api = self.LP_API % (owner_name, ppa_name)
@@ -530,10 +545,53 @@ def main():
sourceslist = None
if not HAVE_PYTHON_APT:
+ # This interpreter can't see the apt Python library- we'll do the following to try and fix that:
+ # 1) look in common locations for system-owned interpreters that can see it; if we find one, respawn under it
+ # 2) finding none, try to install a matching python-apt package for the current interpreter version;
+ # we limit to the current interpreter version to try and avoid installing a whole other Python just
+ # for apt support
+ # 3) if we installed a support package, try to respawn under what we think is the right interpreter (could be
+ # the current interpreter again, but we'll let it respawn anyway for simplicity)
+ # 4) if still not working, return an error and give up (some corner cases not covered, but this shouldn't be
+ # made any more complex than it already is to try and cover more, eg, custom interpreters taking over
+ # system locations)
+
+ apt_pkg_name = 'python3-apt' if PY3 else 'python-apt'
+
+ if has_respawned():
+ # this shouldn't be possible; short-circuit early if it happens...
+ module.fail_json(msg="{0} must be installed and visible from {1}.".format(apt_pkg_name, sys.executable))
+
+ interpreters = ['/usr/bin/python3', '/usr/bin/python2', '/usr/bin/python']
+
+ interpreter = probe_interpreters_for_module(interpreters, 'apt')
+
+ if interpreter:
+ # found the Python bindings; respawn this module under the interpreter where we found them
+ respawn_module(interpreter)
+ # this is the end of the line for this process, it will exit here once the respawned module has completed
+
+ # don't make changes if we're in check_mode
+ if module.check_mode:
+ module.fail_json(msg="%s must be installed to use check mode. "
+ "If run normally this module can auto-install it." % apt_pkg_name)
+
if params['install_python_apt']:
- install_python_apt(module)
+ install_python_apt(module, apt_pkg_name)
+ else:
+ module.fail_json(msg='%s is not installed, and install_python_apt is False' % apt_pkg_name)
+
+ # try again to find the bindings in common places
+ interpreter = probe_interpreters_for_module(interpreters, 'apt')
+
+ if interpreter:
+ # found the Python bindings; respawn this module under the interpreter where we found them
+ # NB: respawn is somewhat wasteful if it's this interpreter, but simplifies the code
+ respawn_module(interpreter)
+ # this is the end of the line for this process, it will exit here once the respawned module has completed
else:
- module.fail_json(msg='%s is not installed, and install_python_apt is False' % PYTHON_APT)
+ # we've done all we can do; just tell the user it's busted and get out
+ module.fail_json(msg="{0} must be installed and visible from {1}.".format(apt_pkg_name, sys.executable))
if not repo:
module.fail_json(msg='Please set argument \'repo\' to a non-empty value')
diff --git a/lib/ansible/modules/async_wrapper.py b/lib/ansible/modules/async_wrapper.py
index 5379726a..7ba8271e 100644
--- a/lib/ansible/modules/async_wrapper.py
+++ b/lib/ansible/modules/async_wrapper.py
@@ -74,7 +74,7 @@ def _filter_non_json_lines(data):
Used to filter unrelated output around module JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
- Filters leading lines before first line-starting occurrence of '{' or '[', and filter all
+ Filters leading lines before first line-starting occurrence of '{', and filter all
trailing lines after matching close character (working from the bottom of output).
'''
warnings = []
@@ -85,10 +85,6 @@ def _filter_non_json_lines(data):
for start, line in enumerate(lines):
line = line.strip()
if line.startswith(u'{'):
- endchar = u'}'
- break
- elif line.startswith(u'['):
- endchar = u']'
break
else:
raise ValueError('No start of json char found')
@@ -97,7 +93,7 @@ def _filter_non_json_lines(data):
lines = lines[start:]
for reverse_end_offset, line in enumerate(reversed(lines)):
- if line.strip().endswith(endchar):
+ if line.strip().endswith(u'}'):
break
else:
raise ValueError('No end of json char found')
diff --git a/lib/ansible/modules/blockinfile.py b/lib/ansible/modules/blockinfile.py
index 2f80a65e..fde44b5d 100644
--- a/lib/ansible/modules/blockinfile.py
+++ b/lib/ansible/modules/blockinfile.py
@@ -328,9 +328,10 @@ def main():
msg = 'Block inserted'
changed = True
+ backup_file = None
if changed and not module.check_mode:
if module.boolean(params['backup']) and path_exists:
- module.backup_local(path)
+ backup_file = module.backup_local(path)
# We should always follow symlinks so that we change the real file
real_path = os.path.realpath(params['path'])
write_changes(module, result, real_path)
@@ -345,7 +346,11 @@ def main():
attr_diff['after_header'] = '%s (file attributes)' % path
difflist = [diff, attr_diff]
- module.exit_json(changed=changed, msg=msg, diff=difflist)
+
+ if backup_file is None:
+ module.exit_json(changed=changed, msg=msg, diff=difflist)
+ else:
+ module.exit_json(changed=changed, msg=msg, diff=difflist, backup_file=backup_file)
if __name__ == '__main__':
diff --git a/lib/ansible/modules/command.py b/lib/ansible/modules/command.py
index 16805432..1ccd3388 100644
--- a/lib/ansible/modules/command.py
+++ b/lib/ansible/modules/command.py
@@ -37,6 +37,7 @@ options:
- The command to run.
argv:
type: list
+ elements: str
description:
- Passes the command as a list rather than a string.
- Use C(argv) to avoid quoting values that would otherwise be interpreted incorrectly (for example "user name").
@@ -46,10 +47,12 @@ options:
type: path
description:
- A filename or (since 2.0) glob pattern. If a matching file already exists, this step B(will not) be run.
+ - This is checked before I(removes) is checked.
removes:
type: path
description:
- A filename or (since 2.0) glob pattern. If a matching file exists, this step B(will) be run.
+ - This is checked after I(creates) is checked.
version_added: "0.8"
chdir:
type: path
@@ -58,13 +61,16 @@ options:
version_added: "0.6"
warn:
description:
- - Enable or disable task warnings.
+ - (deprecated) Enable or disable task warnings.
+ - This feature is deprecated and will be removed in 2.14.
+ - As of version 2.11, this option is now disabled by default.
type: bool
- default: yes
+ default: no
version_added: "1.8"
stdin:
description:
- Set the stdin of the command directly to the specified value.
+ type: str
version_added: "2.4"
stdin_add_newline:
type: bool
@@ -216,19 +222,19 @@ def check_command(module, commandline):
'rmdir': 'state=absent', 'rm': 'state=absent', 'touch': 'state=touch'}
commands = {'curl': 'get_url or uri', 'wget': 'get_url or uri',
'svn': 'subversion', 'service': 'service',
- 'yum': 'yum', 'apt-get': 'apt',
+ 'mount': 'mount', 'rpm': 'yum, dnf or zypper', 'yum': 'yum', 'apt-get': 'apt',
'tar': 'unarchive', 'unzip': 'unarchive', 'sed': 'replace, lineinfile or template',
- 'dnf': 'dnf'}
- become = ['sudo', 'su', 'runas']
+ 'dnf': 'dnf', 'zypper': 'zypper'}
+ become = ['sudo', 'su', 'pbrun', 'pfexec', 'runas', 'pmrun', 'machinectl']
if isinstance(commandline, list):
command = commandline[0]
else:
command = commandline.split()[0]
command = os.path.basename(command)
- disable_suffix = "If you need to use command because {mod} is insufficient you can add" \
+ disable_suffix = "If you need to use '{cmd}' because the {mod} module is insufficient you can add" \
" 'warn: false' to this command task or set 'command_warnings=False' in" \
- " ansible.cfg to get rid of this message."
+ " the defaults section of ansible.cfg to get rid of this message."
substitutions = {'mod': None, 'cmd': command}
if command in arguments:
@@ -250,17 +256,18 @@ def main():
# the command module is the one ansible module that does not take key=value args
# hence don't copy this one if you are looking to build others!
+ # NOTE: ensure splitter.py is kept in sync for exceptions
module = AnsibleModule(
argument_spec=dict(
_raw_params=dict(),
_uses_shell=dict(type='bool', default=False),
- argv=dict(type='list'),
+ argv=dict(type='list', elements='str'),
chdir=dict(type='path'),
executable=dict(),
creates=dict(type='path'),
removes=dict(type='path'),
# The default for this really comes from the action plugin
- warn=dict(type='bool', default=True),
+ warn=dict(type='bool', default=False, removed_in_version='2.14', removed_from_collection='ansible.builtin'),
stdin=dict(required=False),
stdin_add_newline=dict(type='bool', default=True),
strip_empty_ends=dict(type='bool', default=True),
@@ -279,95 +286,103 @@ def main():
stdin_add_newline = module.params['stdin_add_newline']
strip = module.params['strip_empty_ends']
+ # we promissed these in 'always' ( _lines get autoaded on action plugin)
+ r = {'changed': False, 'stdout': '', 'stderr': '', 'rc': None, 'cmd': None, 'start': None, 'end': None, 'delta': None, 'msg': ''}
+
if not shell and executable:
module.warn("As of Ansible 2.4, the parameter 'executable' is no longer supported with the 'command' module. Not using '%s'." % executable)
executable = None
if (not args or args.strip() == '') and not argv:
- module.fail_json(rc=256, msg="no command given")
+ r['rc'] = 256
+ r['msg'] = "no command given"
+ module.fail_json(**r)
if args and argv:
- module.fail_json(rc=256, msg="only command or argv can be given, not both")
+ r['rc'] = 256
+ r['msg'] = "only command or argv can be given, not both"
+ module.fail_json(**r)
if not shell and args:
args = shlex.split(args)
args = args or argv
-
# All args must be strings
if is_iterable(args, include_strings=False):
args = [to_native(arg, errors='surrogate_or_strict', nonstring='simplerepr') for arg in args]
+ r['cmd'] = args
+ if warn:
+ # nany telling you to use module instead!
+ check_command(module, args)
+
if chdir:
try:
- chdir = to_bytes(os.path.abspath(chdir), errors='surrogate_or_strict')
+ chdir = to_bytes(chdir, errors='surrogate_or_strict')
except ValueError as e:
- module.fail_json(msg='Unable to use supplied chdir: %s' % to_text(e))
+ r['msg'] = 'Unable to use supplied chdir from %s: %s ' % (os.getcwd(), to_text(e))
+ module.fail_json(**r)
try:
os.chdir(chdir)
except (IOError, OSError) as e:
- module.fail_json(msg='Unable to change directory before execution: %s' % to_text(e))
+ r['msg'] = 'Unable to change directory before execution: %s' % to_text(e)
+ module.fail_json(**r)
+
+ # check_mode partial support, since it only really works in checking creates/removes
+ if module.check_mode:
+ shoulda = "Would"
+ else:
+ shoulda = "Did"
+ # special skips for idempotence if file exists (assumes command creates)
if creates:
- # do not run the command if the line contains creates=filename
- # and the filename already exists. This allows idempotence
- # of command executions.
if glob.glob(creates):
- module.exit_json(
- cmd=args,
- stdout="skipped, since %s exists" % creates,
- changed=False,
- rc=0
- )
-
- if removes:
- # do not run the command if the line contains removes=filename
- # and the filename does not exist. This allows idempotence
- # of command executions.
- if not glob.glob(removes):
- module.exit_json(
- cmd=args,
- stdout="skipped, since %s does not exist" % removes,
- changed=False,
- rc=0
- )
+ r['msg'] = "%s not run command since '%s' exists" % (shoulda, creates)
+ r['stdout'] = "skipped, since %s exists" % creates # TODO: deprecate
- if warn:
- check_command(module, args)
+ r['rc'] = 0
- startd = datetime.datetime.now()
+ # special skips for idempotence if file does not exist (assumes command removes)
+ if not r['msg'] and removes:
+ if not glob.glob(removes):
+ r['msg'] = "%s not run command since '%s' does not exist" % (shoulda, removes)
+ r['stdout'] = "skipped, since %s does not exist" % removes # TODO: deprecate
+ r['rc'] = 0
+
+ if r['msg']:
+ module.exit_json(**r)
+ # actually executes command (or not ...)
if not module.check_mode:
- rc, out, err = module.run_command(args, executable=executable, use_unsafe_shell=shell, encoding=None, data=stdin, binary_data=(not stdin_add_newline))
- elif creates or removes:
- rc = 0
- out = err = b'Command would have run if not in check mode'
+ r['start'] = datetime.datetime.now()
+ r['rc'], r['stdout'], r['stderr'] = module.run_command(args, executable=executable, use_unsafe_shell=shell, encoding=None,
+ data=stdin, binary_data=(not stdin_add_newline))
+ r['end'] = datetime.datetime.now()
else:
- module.exit_json(msg="skipped, running in check mode", skipped=True)
+ # this is partial check_mode support, since we end up skipping if we get here
+ r['rc'] = 0
+ r['msg'] = "Command would have run if not in check mode"
+ r['skipped'] = True
- endd = datetime.datetime.now()
- delta = endd - startd
+ r['changed'] = True
+
+ # convert to text for jsonization and usability
+ if r['start'] is not None and r['end'] is not None:
+ # these are datetime objects, but need them as strings to pass back
+ r['delta'] = to_text(r['end'] - r['start'])
+ r['end'] = to_text(r['end'])
+ r['start'] = to_text(r['start'])
if strip:
- out = out.rstrip(b"\r\n")
- err = err.rstrip(b"\r\n")
-
- result = dict(
- cmd=args,
- stdout=out,
- stderr=err,
- rc=rc,
- start=str(startd),
- end=str(endd),
- delta=str(delta),
- changed=True,
- )
+ r['stdout'] = to_text(r['stdout']).rstrip("\r\n")
+ r['stderr'] = to_text(r['stderr']).rstrip("\r\n")
- if rc != 0:
- module.fail_json(msg='non-zero return code', **result)
+ if r['rc'] != 0:
+ r['msg'] = 'non-zero return code'
+ module.fail_json(**r)
- module.exit_json(**result)
+ module.exit_json(**r)
if __name__ == '__main__':
diff --git a/lib/ansible/modules/copy.py b/lib/ansible/modules/copy.py
index 3ec4e446..993143f4 100644
--- a/lib/ansible/modules/copy.py
+++ b/lib/ansible/modules/copy.py
@@ -68,14 +68,18 @@ options:
- The permissions of the destination file or directory.
- For those used to C(/usr/bin/chmod) remember that modes are actually octal numbers.
You must either add a leading zero so that Ansible's YAML parser knows it is an octal number
- (like C(0644) or C(01777))or quote it (like C('644') or C('1777')) so Ansible receives a string
+ (like C(0644) or C(01777)) or quote it (like C('644') or C('1777')) so Ansible receives a string
and can do its own conversion from string into number. Giving Ansible a number without following
one of these rules will end up with a decimal number which will have unexpected results.
- As of Ansible 1.8, the mode may be specified as a symbolic mode (for example, C(u+rwx) or C(u=rw,g=r,o=r)).
- As of Ansible 2.3, the mode may also be the special string C(preserve).
- C(preserve) means that the file will be given the same permissions as the source file.
- When doing a recursive copy, see also C(directory_mode).
- type: path
+ - If C(mode) is not specified and the destination file B(does not) exist, the default C(umask) on the system will be used
+ when setting the mode for the newly created file.
+ - If C(mode) is not specified and the destination file B(does) exist, the mode of the existing file will be used.
+ - Specifying C(mode) is the best way to ensure files are created with the correct permissions.
+ See CVE-2020-1736 for further details.
directory_mode:
description:
- When doing a recursive copy set the mode for the directories.
@@ -86,10 +90,11 @@ options:
remote_src:
description:
- Influence whether C(src) needs to be transferred or already is present remotely.
- - If C(no), it will search for C(src) at originating/master machine.
- - If C(yes) it will go to the remote/target machine for the C(src).
+ - If C(no), it will search for C(src) on the controller node.
+ - If C(yes) it will search for C(src) on the managed (remote) node.
- C(remote_src) supports recursive copying as of version 2.8.
- C(remote_src) only works with C(mode=preserve) as of version 2.6.
+ - Autodecryption of files does not work when C(remote_src=yes).
type: bool
default: no
version_added: '2.0'
diff --git a/lib/ansible/modules/cron.py b/lib/ansible/modules/cron.py
index 2424f5c0..62189636 100644
--- a/lib/ansible/modules/cron.py
+++ b/lib/ansible/modules/cron.py
@@ -291,7 +291,10 @@ class CronTab(object):
if len(self.lines) == 0:
return True
else:
- return False
+ for line in self.lines:
+ if line.strip():
+ return False
+ return True
def write(self, backup_file=None):
"""
@@ -675,18 +678,6 @@ def main():
(backuph, backup_file) = tempfile.mkstemp(prefix='crontab')
crontab.write(backup_file)
- if crontab.cron_file and not do_install:
- if module._diff:
- diff['after'] = ''
- diff['after_header'] = '/dev/null'
- else:
- diff = dict()
- if module.check_mode:
- changed = os.path.isfile(crontab.cron_file)
- else:
- changed = crontab.remove_job_file()
- module.exit_json(changed=changed, cron_file=cron_file, state=state, diff=diff)
-
if env:
if ' ' in name:
module.fail_json(msg="Invalid name for environment variable")
@@ -729,6 +720,17 @@ def main():
if len(old_job) > 0:
crontab.remove_job(name)
changed = True
+ if crontab.cron_file and crontab.is_empty():
+ if module._diff:
+ diff['after'] = ''
+ diff['after_header'] = '/dev/null'
+ else:
+ diff = dict()
+ if module.check_mode:
+ changed = os.path.isfile(crontab.cron_file)
+ else:
+ changed = crontab.remove_job_file()
+ module.exit_json(changed=changed, cron_file=cron_file, state=state, diff=diff)
# no changes to env/job, but existing crontab needs a terminating newline
if not changed and crontab.n_existing != '':
diff --git a/lib/ansible/modules/dnf.py b/lib/ansible/modules/dnf.py
index 8ffe61ad..da88ade1 100644
--- a/lib/ansible/modules/dnf.py
+++ b/lib/ansible/modules/dnf.py
@@ -25,6 +25,7 @@ options:
When using state=latest, this can be '*' which means run: dnf -y update.
You can also pass a url or a local path to a rpm file.
To operate on several packages this can accept a comma separated string of packages or a list of packages."
+ - Comparison operators for package version are valid here C(>), C(<), C(>=), C(<=). Example - C(name>=1.0)
required: true
aliases:
- pkg
@@ -34,6 +35,7 @@ options:
list:
description:
- Various (non-idempotent) commands for usage with C(/usr/bin/ansible) and I(not) playbooks. See examples.
+ type: str
state:
description:
@@ -41,22 +43,28 @@ options:
- Default is C(None), however in effect the default action is C(present) unless the C(autoremove) option is
enabled for this module, then C(absent) is inferred.
choices: ['absent', 'present', 'installed', 'removed', 'latest']
+ type: str
enablerepo:
description:
- I(Repoid) of repositories to enable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a ",".
+ type: list
+ elements: str
disablerepo:
description:
- I(Repoid) of repositories to disable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a ",".
+ type: list
+ elements: str
conf_file:
description:
- The remote dnf configuration file to use for the transaction.
+ type: str
disable_gpg_check:
description:
@@ -73,12 +81,14 @@ options:
will be installed.
version_added: "2.3"
default: "/"
+ type: str
releasever:
description:
- Specifies an alternative release from which all packages will be
installed.
version_added: "2.6"
+ type: str
autoremove:
description:
@@ -93,6 +103,8 @@ options:
- Package name(s) to exclude when state=present, or latest. This can be a
list or a comma separated string.
version_added: "2.7"
+ type: list
+ elements: str
skip_broken:
description:
- Skip packages with broken dependencies(devsolve) and are causing problems.
@@ -133,11 +145,15 @@ options:
- I(Plugin) name to enable for the install/update operation.
The enabled plugin will not persist beyond the transaction.
version_added: "2.7"
+ type: list
+ elements: str
disable_plugin:
description:
- I(Plugin) name to disable for the install/update operation.
The disabled plugins will not persist beyond the transaction.
version_added: "2.7"
+ type: list
+ elements: str
disable_excludes:
description:
- Disable the excludes defined in DNF config files.
@@ -145,6 +161,7 @@ options:
- If set to C(main), disable excludes defined in [main] in dnf.conf.
- If set to C(repoid), disable excludes defined for given repo id.
version_added: "2.7"
+ type: str
validate_certs:
description:
- This only applies if using a https url as the source of the rpm. e.g. for localinstall. If set to C(no), the SSL certificates will not be validated.
@@ -204,6 +221,13 @@ options:
type: bool
default: "no"
version_added: "2.10"
+ nobest:
+ description:
+ - Set best option to False, so that transactions are not limited to best candidates only.
+ required: false
+ type: bool
+ default: "no"
+ version_added: "2.11"
notes:
- When used with a `loop:` each package will be processed individually, it is much more efficient to pass the list directly to the `name` option.
- Group removal doesn't work if the group was installed with Ansible because
@@ -227,6 +251,11 @@ EXAMPLES = '''
name: httpd
state: latest
+- name: Install Apache >= 2.4
+ dnf:
+ name: httpd>=2.4
+ state: present
+
- name: Install the latest version of Apache and MariaDB
dnf:
name:
@@ -295,6 +324,15 @@ import os
import re
import sys
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.urls import fetch_file
+from ansible.module_utils.six import PY2, text_type
+from distutils.version import LooseVersion
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.respawn import has_respawned, probe_interpreters_for_module, respawn_module
+from ansible.module_utils.yumdnf import YumDnf, yumdnf_argument_spec
+
try:
import dnf
import dnf.cli
@@ -306,14 +344,6 @@ try:
except ImportError:
HAS_DNF = False
-from ansible.module_utils._text import to_native, to_text
-from ansible.module_utils.urls import fetch_file
-from ansible.module_utils.six import PY2, text_type
-from distutils.version import LooseVersion
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.yumdnf import YumDnf, yumdnf_argument_spec
-
class DnfModule(YumDnf):
"""
@@ -335,6 +365,7 @@ class DnfModule(YumDnf):
# DNF specific args that are not part of YumDnf
self.allowerasing = self.module.params['allowerasing']
+ self.nobest = self.module.params['nobest']
def is_lockfile_pid_valid(self):
# FIXME? it looks like DNF takes care of invalid lock files itself?
@@ -391,15 +422,7 @@ class DnfModule(YumDnf):
return result
- def _packagename_dict(self, packagename):
- """
- Return a dictionary of information for a package name string or None
- if the package name doesn't contain at least all NVR elements
- """
-
- if packagename[-4:] == '.rpm':
- packagename = packagename[:-4]
-
+ def _split_package_arch(self, packagename):
# This list was auto generated on a Fedora 28 system with the following one-liner
# printf '[ '; for arch in $(ls /usr/lib/rpm/platform); do printf '"%s", ' ${arch%-linux}; done; printf ']\n'
redhat_rpm_arches = [
@@ -414,15 +437,26 @@ class DnfModule(YumDnf):
"sparc", "sparcv8", "sparcv9", "sparcv9v", "x86_64"
]
- rpm_arch_re = re.compile(r'(.*)\.(.*)')
+ name, delimiter, arch = packagename.rpartition('.')
+ if name and arch and arch in redhat_rpm_arches:
+ return name, arch
+ return packagename, None
+
+ def _packagename_dict(self, packagename):
+ """
+ Return a dictionary of information for a package name string or None
+ if the package name doesn't contain at least all NVR elements
+ """
+
+ if packagename[-4:] == '.rpm':
+ packagename = packagename[:-4]
+
rpm_nevr_re = re.compile(r'(\S+)-(?:(\d*):)?(.*)-(~?\w+[\w.+]*)')
try:
arch = None
- rpm_arch_match = rpm_arch_re.match(packagename)
- if rpm_arch_match:
- nevr, arch = rpm_arch_match.groups()
- if arch in redhat_rpm_arches:
- packagename = nevr
+ nevr, arch = self._split_package_arch(packagename)
+ if arch:
+ packagename = nevr
rpm_nevr_match = rpm_nevr_re.match(packagename)
if rpm_nevr_match:
name, epoch, version, release = rpm_nevr_re.match(packagename).groups()
@@ -479,40 +513,31 @@ class DnfModule(YumDnf):
return rc
def _ensure_dnf(self):
- if not HAS_DNF:
- if PY2:
- package = 'python2-dnf'
- else:
- package = 'python3-dnf'
-
- if self.module.check_mode:
- self.module.fail_json(
- msg="`{0}` is not installed, but it is required"
- "for the Ansible dnf module.".format(package),
- results=[],
- )
-
- rc, stdout, stderr = self.module.run_command(['dnf', 'install', '-y', package])
- global dnf
- try:
- import dnf
- import dnf.cli
- import dnf.const
- import dnf.exceptions
- import dnf.subject
- import dnf.util
- except ImportError:
- self.module.fail_json(
- msg="Could not import the dnf python module using {0} ({1}). "
- "Please install `{2}` package or ensure you have specified the "
- "correct ansible_python_interpreter.".format(sys.executable, sys.version.replace('\n', ''),
- package),
- results=[],
- cmd='dnf install -y {0}'.format(package),
- rc=rc,
- stdout=stdout,
- stderr=stderr,
- )
+ if HAS_DNF:
+ return
+
+ system_interpreters = ['/usr/libexec/platform-python',
+ '/usr/bin/python3',
+ '/usr/bin/python2',
+ '/usr/bin/python']
+
+ if not has_respawned():
+ # probe well-known system Python locations for accessible bindings, favoring py3
+ interpreter = probe_interpreters_for_module(system_interpreters, 'dnf')
+
+ if interpreter:
+ # respawn under the interpreter where the bindings should be found
+ respawn_module(interpreter)
+ # end of the line for this module, the process will exit here once the respawned module completes
+
+ # done all we can do, something is just broken (auto-install isn't useful anymore with respawn, so it was removed)
+ self.module.fail_json(
+ msg="Could not import the dnf python module using {0} ({1}). "
+ "Please install `python3-dnf` or `python2-dnf` package or ensure you have specified the "
+ "correct ansible_python_interpreter. (attempted {2})"
+ .format(sys.executable, sys.version.replace('\n', ''), system_interpreters),
+ results=[]
+ )
def _configure_base(self, base, conf_file, disable_gpg_check, installroot='/'):
"""Configure the dnf Base object."""
@@ -578,6 +603,10 @@ class DnfModule(YumDnf):
if self.skip_broken:
conf.strict = 0
+ # Set best
+ if self.nobest:
+ conf.best = 0
+
if self.download_only:
conf.downloadonly = True
if self.download_dir:
@@ -682,7 +711,20 @@ class DnfModule(YumDnf):
def _is_installed(self, pkg):
installed = self.base.sack.query().installed()
- if installed.filter(name=pkg):
+
+ package_spec = {}
+ name, arch = self._split_package_arch(pkg)
+ if arch:
+ package_spec['arch'] = arch
+
+ package_details = self._packagename_dict(pkg)
+ if package_details:
+ package_details['epoch'] = int(package_details['epoch'])
+ package_spec.update(package_details)
+ else:
+ package_spec['name'] = name
+
+ if installed.filter(**package_spec):
return True
else:
return False
@@ -1121,21 +1163,11 @@ class DnfModule(YumDnf):
response['results'].append(handled_remove_error)
continue
- installed_pkg = list(map(str, installed.filter(name=pkg_spec).run()))
- if installed_pkg:
- candidate_pkg = self._packagename_dict(installed_pkg[0])
- installed_pkg = installed.filter(name=candidate_pkg['name']).run()
- else:
- candidate_pkg = self._packagename_dict(pkg_spec)
- installed_pkg = installed.filter(nevra=pkg_spec).run()
- if installed_pkg:
- installed_pkg = installed_pkg[0]
- evr_cmp = self._compare_evr(
- installed_pkg.epoch, installed_pkg.version, installed_pkg.release,
- candidate_pkg['epoch'], candidate_pkg['version'], candidate_pkg['release'],
- )
- if evr_cmp == 0:
- self.base.remove(pkg_spec)
+ installed_pkg = dnf.subject.Subject(pkg_spec).get_best_query(
+ sack=self.base.sack).installed().run()
+
+ for pkg in installed_pkg:
+ self.base.remove(str(pkg))
# Like the dnf CLI we want to allow recursive removal of dependent
# packages
@@ -1309,6 +1341,7 @@ def main():
# Extend yumdnf_argument_spec with dnf-specific features that will never be
# backported to yum because yum is now in "maintenance mode" upstream
yumdnf_argument_spec['argument_spec']['allowerasing'] = dict(default=False, type='bool')
+ yumdnf_argument_spec['argument_spec']['nobest'] = dict(default=False, type='bool')
module = AnsibleModule(
**yumdnf_argument_spec
diff --git a/lib/ansible/modules/dpkg_selections.py b/lib/ansible/modules/dpkg_selections.py
index 9043786e..a51b9e58 100644
--- a/lib/ansible/modules/dpkg_selections.py
+++ b/lib/ansible/modules/dpkg_selections.py
@@ -20,13 +20,15 @@ author:
options:
name:
description:
- - Name of the package
+ - Name of the package.
required: true
+ type: str
selection:
description:
- The selection state to set the package to.
choices: [ 'install', 'hold', 'deinstall', 'purge' ]
required: true
+ type: str
notes:
- This module won't cause any packages to be installed/removed/purged, use the C(apt) module for that.
'''
@@ -44,7 +46,7 @@ def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
- selection=dict(choices=['install', 'hold', 'deinstall', 'purge'])
+ selection=dict(choices=['install', 'hold', 'deinstall', 'purge'], required=True)
),
supports_check_mode=True,
)
diff --git a/lib/ansible/modules/expect.py b/lib/ansible/modules/expect.py
index 3bb9d960..290ffa9d 100644
--- a/lib/ansible/modules/expect.py
+++ b/lib/ansible/modules/expect.py
@@ -23,6 +23,7 @@ options:
description:
- The command module takes command to run.
required: true
+ type: str
creates:
type: path
description:
@@ -62,6 +63,9 @@ notes:
C(/bin/bash -c "/path/to/something | grep else").
- The question, or key, under I(responses) is a python regex match. Case
insensitive searches are indicated with a prefix of C(?i).
+ - The C(pexpect) library used by this module operates with a search window
+ of 2000 bytes, and does not use a multiline regex match. To perform a
+ start of line bound match, use a pattern like ``(?m)^pattern``
- By default, if a question is encountered multiple times, its string
response will be repeated. If you need different responses for successive
question matches, instead of a string response, use a list of strings as
@@ -107,11 +111,11 @@ except ImportError:
HAS_PEXPECT = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils._text import to_bytes, to_native, to_text
def response_closure(module, question, responses):
- resp_gen = (u'%s\n' % to_text(r).rstrip(u'\n') for r in responses)
+ resp_gen = (b'%s\n' % to_bytes(r).rstrip(b'\n') for r in responses)
def wrapped(info):
try:
@@ -155,9 +159,9 @@ def main():
if isinstance(value, list):
response = response_closure(module, key, value)
else:
- response = u'%s\n' % to_text(value).rstrip(u'\n')
+ response = b'%s\n' % to_bytes(value).rstrip(b'\n')
- events[to_text(key)] = response
+ events[to_bytes(key)] = response
if args.strip() == '':
module.fail_json(rc=256, msg="no command given")
@@ -195,13 +199,18 @@ def main():
try:
try:
# Prefer pexpect.run from pexpect>=4
- out, rc = pexpect.run(args, timeout=timeout, withexitstatus=True,
- events=events, cwd=chdir, echo=echo,
- encoding='utf-8')
+ b_out, rc = pexpect.run(args, timeout=timeout, withexitstatus=True,
+ events=events, cwd=chdir, echo=echo,
+ encoding=None)
except TypeError:
- # Use pexpect.runu in pexpect>=3.3,<4
- out, rc = pexpect.runu(args, timeout=timeout, withexitstatus=True,
- events=events, cwd=chdir, echo=echo)
+ # Use pexpect._run in pexpect>=3.3,<4
+ # pexpect.run doesn't support `echo`
+ # pexpect.runu doesn't support encoding=None
+ b_out, rc = pexpect._run(args, timeout=timeout, withexitstatus=True,
+ events=events, extra_args=None, logfile=None,
+ cwd=chdir, env=None, _spawn=pexpect.spawn,
+ echo=echo)
+
except (TypeError, AttributeError) as e:
# This should catch all insufficient versions of pexpect
# We deem them insufficient for their lack of ability to specify
@@ -216,12 +225,12 @@ def main():
endd = datetime.datetime.now()
delta = endd - startd
- if out is None:
- out = ''
+ if b_out is None:
+ b_out = b''
result = dict(
cmd=args,
- stdout=out.rstrip('\r\n'),
+ stdout=to_native(b_out).rstrip('\r\n'),
rc=rc,
start=str(startd),
end=str(endd),
diff --git a/lib/ansible/modules/file.py b/lib/ansible/modules/file.py
index f55f6a37..347b4e94 100644
--- a/lib/ansible/modules/file.py
+++ b/lib/ansible/modules/file.py
@@ -36,9 +36,9 @@ options:
not exist as the state did not change.
- If C(directory), all intermediate subdirectories will be created if they
do not exist. Since Ansible 1.7 they will be created with the supplied permissions.
- - If C(file), without any other options this works mostly as a 'stat' and will return the current state of C(path).
- Even with other options (i.e C(mode)), the file will be modified but will NOT be created if it does not exist;
- see the C(touch) value or the M(ansible.builtin.copy) or M(ansible.builtin.template) module if you want that behavior.
+ - If C(file), with no other options, returns the current state of C(path).
+ - If C(file), even with other options (such as C(mode)), the file will be modified if it exists but will NOT be created if it does not exist.
+ Set to C(touch) or use the M(ansible.builtin.copy) or M(ansible.builtin.template) module if you want to create the file if it does not exist.
- If C(hard), the hard link will be created or changed.
- If C(link), the symbolic link will be created or changed.
- If C(touch) (new in 1.4), an empty file will be created if the C(path) does not
@@ -227,6 +227,9 @@ import shutil
import sys
import time
+from pwd import getpwnam, getpwuid
+from grp import getgrnam, getgrgid
+
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes, to_native
@@ -240,7 +243,7 @@ class AnsibleModuleError(Exception):
self.results = results
def __repr__(self):
- print('AnsibleModuleError(results={0})'.format(self.results))
+ return 'AnsibleModuleError(results={0})'.format(self.results)
class ParameterError(AnsibleModuleError):
@@ -561,7 +564,7 @@ def execute_touch(path, follow, timestamps):
changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
except SystemExit as e:
- if e.code:
+ if e.code: # this is the exit code passed to sys.exit, not a constant -- pylint: disable=using-constant-test
# We take this to mean that fail_json() was called from
# somewhere in basic.py
if prev_state == 'absent':
@@ -682,7 +685,7 @@ def ensure_symlink(path, src, follow, force, timestamps):
if src is None:
if follow:
# use the current target of the link as the source
- src = to_native(os.path.realpath(b_path), errors='strict')
+ src = to_native(os.readlink(b_path), errors='strict')
b_src = to_bytes(src, errors='surrogate_or_strict')
if not os.path.islink(b_path) and os.path.isdir(b_path):
@@ -862,6 +865,34 @@ def ensure_hardlink(path, src, follow, force, timestamps):
return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
+def check_owner_exists(module, owner):
+ try:
+ uid = int(owner)
+ try:
+ getpwuid(uid).pw_name
+ except KeyError:
+ module.warn('failed to look up user with uid %s. Create user up to this point in real play' % uid)
+ except ValueError:
+ try:
+ getpwnam(owner).pw_uid
+ except KeyError:
+ module.warn('failed to look up user %s. Create user up to this point in real play' % owner)
+
+
+def check_group_exists(module, group):
+ try:
+ gid = int(group)
+ try:
+ getgrgid(gid).gr_name
+ except KeyError:
+ module.warn('failed to look up group with gid %s. Create group up to this point in real play' % gid)
+ except ValueError:
+ try:
+ getgrnam(group).gr_gid
+ except KeyError:
+ module.warn('failed to look up group %s. Create group up to this point in real play' % group)
+
+
def main():
global module
@@ -897,6 +928,13 @@ def main():
path = params['path']
src = params['src']
+ if module.check_mode and state != 'absent':
+ file_args = module.load_file_common_arguments(module.params)
+ if file_args['owner']:
+ check_owner_exists(module, file_args['owner'])
+ if file_args['group']:
+ check_group_exists(module, file_args['group'])
+
timestamps = {}
timestamps['modification_time'] = keep_backward_compatibility_on_timestamps(params['modification_time'], state)
timestamps['modification_time_format'] = params['modification_time_format']
diff --git a/lib/ansible/modules/find.py b/lib/ansible/modules/find.py
index 3b52ac0a..ca8b483b 100644
--- a/lib/ansible/modules/find.py
+++ b/lib/ansible/modules/find.py
@@ -29,7 +29,7 @@ options:
first letter of any of those words (e.g., "1w").
type: str
patterns:
- default: '*'
+ default: []
description:
- One or more (shell or regex) patterns, which type is controlled by C(use_regex) option.
- The patterns restrict the list of files to be returned to those whose basenames match at
@@ -41,8 +41,10 @@ options:
- This parameter expects a list, which can be either comma separated or YAML. If any of the
patterns contain a comma, make sure to put them in a list to avoid splitting the patterns
in undesirable ways.
+ - Defaults to '*' when C(use_regex=False), or '.*' when C(use_regex=True).
type: list
aliases: [ pattern ]
+ elements: str
excludes:
description:
- One or more (shell or regex) patterns, which type is controlled by C(use_regex) option.
@@ -51,16 +53,28 @@ options:
type: list
aliases: [ exclude ]
version_added: "2.5"
+ elements: str
contains:
description:
- A regular expression or pattern which should be matched against the file content.
+ - Works only when I(file_type) is C(file).
type: str
+ read_whole_file:
+ description:
+ - When doing a C(contains) search, determines whether the whole file should be read into
+ memory or if the regex should be applied to the file line-by-line.
+ - Setting this to C(true) can have performance and memory implications for large files.
+ - This uses C(re.search()) instead of C(re.match()).
+ type: bool
+ default: false
+ version_added: "2.11"
paths:
description:
- List of paths of directories to search. All paths must be fully qualified.
type: list
required: true
aliases: [ name, path ]
+ elements: str
file_type:
description:
- Type of file to select.
@@ -80,6 +94,7 @@ options:
- Unqualified values are in bytes but b, k, m, g, and t can be appended to specify
bytes, kilobytes, megabytes, gigabytes, and terabytes, respectively.
- Size is not evaluated for directories.
+ type: str
age_stamp:
description:
- Choose the file property against which we compare age.
@@ -215,16 +230,17 @@ import re
import stat
import time
+from ansible.module_utils._text import to_text, to_native
from ansible.module_utils.basic import AnsibleModule
def pfilter(f, patterns=None, excludes=None, use_regex=False):
'''filter using glob patterns'''
- if patterns is None and excludes is None:
+ if not patterns and not excludes:
return True
if use_regex:
- if patterns and excludes is None:
+ if patterns and not excludes:
for p in patterns:
r = re.compile(p)
if r.match(f):
@@ -241,7 +257,7 @@ def pfilter(f, patterns=None, excludes=None, use_regex=False):
return True
else:
- if patterns and excludes is None:
+ if patterns and not excludes:
for p in patterns:
if fnmatch.fnmatch(f, p):
return True
@@ -279,11 +295,12 @@ def sizefilter(st, size):
return False
-def contentfilter(fsname, pattern):
+def contentfilter(fsname, pattern, read_whole_file=False):
"""
Filter files which contain the given expression
:arg fsname: Filename to scan for lines matching a pattern
:arg pattern: Pattern to look for inside of line
+ :arg read_whole_file: If true, the whole file is read into memory before the regex is applied against it. Otherwise, the regex is applied line-by-line.
:rtype: bool
:returns: True if one of the lines in fsname matches the pattern. Otherwise False
"""
@@ -294,6 +311,9 @@ def contentfilter(fsname, pattern):
try:
with open(fsname) as f:
+ if read_whole_file:
+ return bool(prog.search(f.read()))
+
for line in f:
if prog.match(line):
return True
@@ -355,10 +375,11 @@ def statinfo(st):
def main():
module = AnsibleModule(
argument_spec=dict(
- paths=dict(type='list', required=True, aliases=['name', 'path']),
- patterns=dict(type='list', default=['*'], aliases=['pattern']),
- excludes=dict(type='list', aliases=['exclude']),
+ paths=dict(type='list', required=True, aliases=['name', 'path'], elements='str'),
+ patterns=dict(type='list', default=[], aliases=['pattern'], elements='str'),
+ excludes=dict(type='list', aliases=['exclude'], elements='str'),
contains=dict(type='str'),
+ read_whole_file=dict(type='bool', default=False),
file_type=dict(type='str', default="file", choices=['any', 'directory', 'file', 'link']),
age=dict(type='str'),
age_stamp=dict(type='str', default="mtime", choices=['atime', 'ctime', 'mtime']),
@@ -375,6 +396,16 @@ def main():
params = module.params
+ # Set the default match pattern to either a match-all glob or
+ # regex depending on use_regex being set. This makes sure if you
+ # set excludes: without a pattern pfilter gets something it can
+ # handle.
+ if not params['patterns']:
+ if params['use_regex']:
+ params['patterns'] = ['.*']
+ else:
+ params['patterns'] = ['*']
+
filelist = []
if params['age'] is None:
@@ -404,7 +435,10 @@ def main():
looked = 0
for npath in params['paths']:
npath = os.path.expanduser(os.path.expandvars(npath))
- if os.path.isdir(npath):
+ try:
+ if not os.path.isdir(npath):
+ raise Exception("'%s' is not a directory" % to_native(npath))
+
for root, dirs, files in os.walk(npath, followlinks=params['follow']):
looked = looked + len(files) + len(dirs)
for fsobj in (files + dirs):
@@ -421,8 +455,8 @@ def main():
try:
st = os.lstat(fsname)
- except Exception:
- msg += "%s was skipped as it does not seem to be a valid file or it cannot be accessed\n" % fsname
+ except (IOError, OSError) as e:
+ msg += "Skipped entry '%s' due to this access issue: %s\n" % (fsname, to_text(e))
continue
r = {'path': fsname}
@@ -443,7 +477,7 @@ def main():
elif stat.S_ISREG(st.st_mode) and params['file_type'] == 'file':
if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and \
agefilter(st, now, age, params['age_stamp']) and \
- sizefilter(st, size) and contentfilter(fsname, params['contains']):
+ sizefilter(st, size) and contentfilter(fsname, params['contains'], params['read_whole_file']):
r.update(statinfo(st))
if params['get_checksum']:
@@ -458,8 +492,10 @@ def main():
if not params['recurse']:
break
- else:
- msg += "%s was skipped as it does not seem to be a valid directory or it cannot be accessed\n" % npath
+ except Exception as e:
+ warn = "Skipped '%s' path due to this access issue: %s\n" % (to_text(npath), to_text(e))
+ module.warn(warn)
+ msg += warn
matched = len(filelist)
module.exit_json(files=filelist, changed=False, msg=msg, matched=matched, examined=looked)
diff --git a/lib/ansible/modules/gather_facts.py b/lib/ansible/modules/gather_facts.py
index a019d835..42a463b7 100644
--- a/lib/ansible/modules/gather_facts.py
+++ b/lib/ansible/modules/gather_facts.py
@@ -13,7 +13,7 @@ module: gather_facts
version_added: 2.8
short_description: Gathers facts about remote hosts
description:
- - This module takes care of executing the configured facts modules, the default is to use the M(ansible.builtin.setup) module.
+ - This module takes care of executing the R(configured facts modules,FACTS_MODULES), the default is to use the M(ansible.builtin.setup) module.
- This module is automatically called by playbooks to gather useful variables about remote hosts that can be used in playbooks.
- It can also be executed directly by C(/usr/bin/ansible) to check what variables are available to a host.
- Ansible provides many I(facts) about the system, automatically.
@@ -38,6 +38,6 @@ RETURN = """
"""
EXAMPLES = """
-# Display facts from all hosts and store them indexed by I(hostname) at C(/tmp/facts).
+# Display facts from all hosts and store them indexed by hostname at /tmp/facts.
# ansible all -m gather_facts --tree /tmp/facts
"""
diff --git a/lib/ansible/modules/get_url.py b/lib/ansible/modules/get_url.py
index 9036b354..314f7e1d 100644
--- a/lib/ansible/modules/get_url.py
+++ b/lib/ansible/modules/get_url.py
@@ -18,8 +18,7 @@ description:
- By default, if an environment variable C(<protocol>_proxy) is set on
the target host, requests will be sent through that proxy. This
behaviour can be overridden by setting a variable for this task
- (see `setting the environment
- <https://docs.ansible.com/playbooks_environment.html>`_),
+ (see R(setting the environment,playbooks_environment)),
or by using the use_proxy option.
- HTTP redirects can redirect from HTTP to HTTPS so you should be sure that
your proxy environment for both protocols is correct.
@@ -78,6 +77,7 @@ options:
This option is deprecated and will be removed in version 2.14. Use
option C(checksum) instead.
default: ''
+ type: str
version_added: "1.3"
checksum:
description:
@@ -165,6 +165,17 @@ options:
- Header to identify as, generally appears in web server logs.
type: str
default: ansible-httpget
+ use_gssapi:
+ description:
+ - Use GSSAPI to perform the authentication, typically this is for Kerberos or Kerberos through Negotiate
+ authentication.
+ - Requires the Python library L(gssapi,https://github.com/pythongssapi/python-gssapi) to be installed.
+ - Credentials for GSSAPI can be specified with I(url_username)/I(url_password) or with the GSSAPI env var
+ C(KRB5CCNAME) that specified a custom Kerberos credential cache.
+ - NTLM authentication is C(not) supported even if the GSSAPI mech for NTLM has been installed.
+ type: bool
+ default: no
+ version_added: '2.11'
# informational: requirements for nodes
extends_documentation_fragment:
- files
@@ -346,16 +357,12 @@ def url_filename(url):
return fn
-def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10, headers=None, tmp_dest=''):
+def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10, headers=None, tmp_dest='', method='GET'):
"""
Download data from the url and store in a temporary file.
Return (tempfile, info about the request)
"""
- if module.check_mode:
- method = 'HEAD'
- else:
- method = 'GET'
start = datetime.datetime.utcnow()
rsp, info = fetch_url(module, url, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout, headers=headers, method=method)
@@ -416,6 +423,14 @@ def extract_filename_from_headers(headers):
return res
+def is_url(checksum):
+ """
+ Returns True if checksum value has supported URL scheme, else False."""
+ supported_schemes = ('http', 'https', 'ftp', 'file')
+
+ return urlsplit(checksum).scheme in supported_schemes
+
+
# ==============================================================
# main
@@ -487,23 +502,30 @@ def main():
except ValueError:
module.fail_json(msg="The checksum parameter has to be in format <algorithm>:<checksum>", **result)
- if checksum.startswith('http://') or checksum.startswith('https://') or checksum.startswith('ftp://'):
+ if is_url(checksum):
checksum_url = checksum
# download checksum file to checksum_tmpsrc
checksum_tmpsrc, checksum_info = url_get(module, checksum_url, dest, use_proxy, last_mod_time, force, timeout, headers, tmp_dest)
with open(checksum_tmpsrc) as f:
lines = [line.rstrip('\n') for line in f]
os.remove(checksum_tmpsrc)
- checksum_map = {}
+ checksum_map = []
for line in lines:
- parts = line.split(None, 1)
+ # Split by one whitespace to keep the leading type char ' ' (whitespace) for text and '*' for binary
+ parts = line.split(" ", 1)
if len(parts) == 2:
- checksum_map[parts[0]] = parts[1]
+ # Remove the leading type char, we expect
+ if parts[1].startswith((" ", "*",)):
+ parts[1] = parts[1][1:]
+
+ # Append checksum and path without potential leading './'
+ checksum_map.append((parts[0], parts[1].lstrip("./")))
+
filename = url_filename(url)
# Look through each line in the checksum file for a hash corresponding to
# the filename in the url, returning the first hash that is found.
- for cksum in (s for (s, f) in checksum_map.items() if f.strip('./') == filename):
+ for cksum in (s for (s, f) in checksum_map if f == filename):
checksum = cksum
break
else:
@@ -553,7 +575,8 @@ def main():
# download to tmpsrc
start = datetime.datetime.utcnow()
- tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, timeout, headers, tmp_dest)
+ method = 'HEAD' if module.check_mode else 'GET'
+ tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, timeout, headers, tmp_dest, method)
result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
result['src'] = tmpsrc
diff --git a/lib/ansible/modules/getent.py b/lib/ansible/modules/getent.py
index 1997aa0d..e5c7aaf2 100644
--- a/lib/ansible/modules/getent.py
+++ b/lib/ansible/modules/getent.py
@@ -21,20 +21,24 @@ options:
description:
- The name of a getent database supported by the target system (passwd, group,
hosts, etc).
+ type: str
required: True
key:
description:
- Key from which to return values from the specified database, otherwise the
full contents are returned.
+ type: str
default: ''
service:
description:
- Override all databases with the specified service
- The underlying system must support the service flag which is not always available.
+ type: str
version_added: "2.9"
split:
description:
- "Character used to split the database values into lists/arrays such as ':' or '\t', otherwise it will try to pick one depending on the database."
+ type: str
fail_key:
description:
- If a supplied key is missing this will make the task fail if C(yes).
@@ -95,7 +99,7 @@ def main():
module = AnsibleModule(
argument_spec=dict(
database=dict(type='str', required=True),
- key=dict(type='str'),
+ key=dict(type='str', no_log=False),
service=dict(type='str'),
split=dict(type='str'),
fail_key=dict(type='bool', default=True),
diff --git a/lib/ansible/modules/git.py b/lib/ansible/modules/git.py
index 2c2b36db..96810086 100644
--- a/lib/ansible/modules/git.py
+++ b/lib/ansible/modules/git.py
@@ -22,19 +22,24 @@ options:
repo:
description:
- git, SSH, or HTTP(S) protocol address of the git repository.
+ type: str
required: true
aliases: [ name ]
dest:
description:
- The path of where the repository should be checked out. This
- parameter is required, unless C(clone) is set to C(no).
+ is equivalent to C(git clone [repo_url] [directory]). The repository
+ named in I(repo) is not appended to this path and the destination directory must be empty. This
+ parameter is required, unless I(clone) is set to C(no).
+ type: path
required: true
version:
description:
- - What version of the repository to check out. This can be
+ - What version of the repository to check out. This can be
the literal string C(HEAD), a branch name, a tag name.
It can also be a I(SHA-1) hash, in which case I(refspec) needs
to be specified if the given revision is not already available.
+ type: str
default: "HEAD"
accept_hostkey:
description:
@@ -48,12 +53,14 @@ options:
- Creates a wrapper script and exports the path as GIT_SSH
which git then automatically uses to override ssh arguments.
An example value could be "-o StrictHostKeyChecking=no"
- (although this particular option is better set via
- C(accept_hostkey)).
+ (although this particular option is better set by
+ I(accept_hostkey)).
+ type: str
version_added: "1.5"
key_file:
description:
- Specify an optional private key file path, on the target host, to use for the checkout.
+ type: path
version_added: "1.5"
reference:
description:
@@ -62,6 +69,7 @@ options:
remote:
description:
- Name of the remote.
+ type: str
default: "origin"
refspec:
description:
@@ -69,8 +77,9 @@ options:
If version is set to a I(SHA-1) not reachable from any branch
or tag, this option may be necessary to specify the ref containing
the I(SHA-1).
- Uses the same syntax as the 'git fetch' command.
+ Uses the same syntax as the C(git fetch) command.
An example value could be "refs/meta/config".
+ type: str
version_added: "1.9"
force:
description:
@@ -86,6 +95,7 @@ options:
- Create a shallow clone with a history truncated to the specified
number or revisions. The minimum possible value is C(1), otherwise
ignored. Needs I(git>=1.9.1) to work correctly.
+ type: int
version_added: "1.2"
clone:
description:
@@ -105,6 +115,7 @@ options:
description:
- Path to git executable to use. If not supplied,
the normal mechanism for resolving binary paths will be used.
+ type: path
version_added: "1.4"
bare:
description:
@@ -117,6 +128,7 @@ options:
description:
- The umask to set before doing any checkouts, or any other
repository maintenance.
+ type: raw
version_added: "2.2"
recursive:
@@ -127,6 +139,13 @@ options:
default: 'yes'
version_added: "1.6"
+ single_branch:
+ description:
+ - Clone only the history leading to the tip of the specified revision.
+ type: bool
+ default: 'no'
+ version_added: '2.11'
+
track_submodules:
description:
- If C(yes), submodules will track the latest commit on their
@@ -156,6 +175,7 @@ options:
Allowed archive formats ["zip", "tar.gz", "tar", "tgz"].
- This will clone and perform git archive from local directory as not
all git servers support git archive.
+ type: path
version_added: "2.4"
archive_prefix:
@@ -168,6 +188,7 @@ options:
description:
- The path to place the cloned repository. If specified, Git repository
can be separated from working tree.
+ type: path
version_added: "2.7"
gpg_whitelist:
@@ -177,6 +198,7 @@ options:
- Only used when I(verify_commit=yes).
- Use of this feature requires Git 2.6+ due to its reliance on git's C(--raw) flag to C(verify-commit) and C(verify-tag).
type: list
+ elements: str
default: []
version_added: "2.9"
@@ -234,6 +256,21 @@ EXAMPLES = '''
repo: https://github.com/ansible/ansible-examples.git
dest: /src/ansible-examples
separate_git_dir: /src/ansible-examples.git
+
+- name: Example clone of a single branch
+ ansible.builtin.git:
+ repo: https://github.com/ansible/ansible-examples.git
+ dest: /src/ansible-examples
+ single_branch: yes
+ version: master
+
+- name: Avoid hanging when http(s) password is missing
+ ansible.builtin.git:
+ repo: https://github.com/ansible/could-be-a-private-repo
+ dest: /src/from-private-repo
+ environment:
+ GIT_TERMINAL_PROMPT: 0 # reports "terminal prompts disabled" on missing password
+ # or GIT_ASKPASS: /bin/true # for git before version 2.3.0, reports "Authentication failed" on missing password
'''
RETURN = '''
@@ -256,7 +293,7 @@ warnings:
description: List of warnings if requested features were not available due to a too old git version.
returned: error
type: str
- sample: Your git version is too old to fully support the depth argument. Falling back to full checkouts.
+ sample: git version is too old to fully support the depth argument. Falling back to full checkouts.
git_dir_now:
description: Contains the new path of .git directory if it is changed.
returned: success
@@ -455,7 +492,7 @@ def get_submodule_versions(git_path, module, dest, version='HEAD'):
def clone(git_path, module, repo, dest, remote, depth, version, bare,
- reference, refspec, verify_commit, separate_git_dir, result, gpg_whitelist):
+ reference, refspec, git_version_used, verify_commit, separate_git_dir, result, gpg_whitelist, single_branch):
''' makes a new git repo if it does not already exist '''
dest_dirname = os.path.dirname(dest)
try:
@@ -468,11 +505,12 @@ def clone(git_path, module, repo, dest, remote, depth, version, bare,
cmd.append('--bare')
else:
cmd.extend(['--origin', remote])
+
+ is_branch_or_tag = is_remote_branch(git_path, module, dest, repo, version) or is_remote_tag(git_path, module, dest, repo, version)
if depth:
if version == 'HEAD' or refspec:
cmd.extend(['--depth', str(depth)])
- elif is_remote_branch(git_path, module, dest, repo, version) \
- or is_remote_tag(git_path, module, dest, repo, version):
+ elif is_branch_or_tag:
cmd.extend(['--depth', str(depth)])
cmd.extend(['--branch', version])
else:
@@ -482,12 +520,23 @@ def clone(git_path, module, repo, dest, remote, depth, version, bare,
"HEAD, branches, tags or in combination with refspec.")
if reference:
cmd.extend(['--reference', str(reference)])
- needs_separate_git_dir_fallback = False
+ if single_branch:
+ if git_version_used is None:
+ module.fail_json(msg='Cannot find git executable at %s' % git_path)
+
+ if git_version_used < LooseVersion('1.7.10'):
+ module.warn("git version '%s' is too old to use 'single-branch'. Ignoring." % git_version_used)
+ else:
+ cmd.append("--single-branch")
+
+ if is_branch_or_tag:
+ cmd.extend(['--branch', version])
+
+ needs_separate_git_dir_fallback = False
if separate_git_dir:
- git_version_used = git_version(git_path, module)
if git_version_used is None:
- module.fail_json(msg='Can not find git executable at %s' % git_path)
+ module.fail_json(msg='Cannot find git executable at %s' % git_path)
if git_version_used < LooseVersion('1.7.5'):
# git before 1.7.5 doesn't have separate-git-dir argument, do fallback
needs_separate_git_dir_fallback = True
@@ -1058,13 +1107,14 @@ def main():
clone=dict(default='yes', type='bool'),
update=dict(default='yes', type='bool'),
verify_commit=dict(default='no', type='bool'),
- gpg_whitelist=dict(default=[], type='list'),
+ gpg_whitelist=dict(default=[], type='list', elements='str'),
accept_hostkey=dict(default='no', type='bool'),
key_file=dict(default=None, type='path', required=False),
ssh_opts=dict(default=None, required=False),
executable=dict(default=None, type='path'),
bare=dict(default='no', type='bool'),
recursive=dict(default='yes', type='bool'),
+ single_branch=dict(default=False, type='bool'),
track_submodules=dict(default='no', type='bool'),
umask=dict(default=None, type='raw'),
archive=dict(type='path'),
@@ -1089,6 +1139,7 @@ def main():
verify_commit = module.params['verify_commit']
gpg_whitelist = module.params['gpg_whitelist']
reference = module.params['reference']
+ single_branch = module.params['single_branch']
git_path = module.params['executable'] or module.get_bin_path('git', True)
key_file = module.params['key_file']
ssh_opts = module.params['ssh_opts']
@@ -1161,7 +1212,7 @@ def main():
git_version_used = git_version(git_path, module)
if depth is not None and git_version_used < LooseVersion('1.9.1'):
- result['warnings'].append("Your git version is too old to fully support the depth argument. Falling back to full checkouts.")
+ module.warn("git version is too old to fully support the depth argument. Falling back to full checkouts.")
depth = None
recursive = module.params['recursive']
@@ -1184,7 +1235,8 @@ def main():
result['diff'] = diff
module.exit_json(**result)
# there's no git config, so clone
- clone(git_path, module, repo, dest, remote, depth, version, bare, reference, refspec, verify_commit, separate_git_dir, result, gpg_whitelist)
+ clone(git_path, module, repo, dest, remote, depth, version, bare, reference,
+ refspec, git_version_used, verify_commit, separate_git_dir, result, gpg_whitelist, single_branch)
elif not update:
# Just return having found a repo already in the dest path
# this does no checking that the repo is the actual repo
diff --git a/lib/ansible/modules/group_by.py b/lib/ansible/modules/group_by.py
index 34290d02..8231b76a 100644
--- a/lib/ansible/modules/group_by.py
+++ b/lib/ansible/modules/group_by.py
@@ -31,6 +31,9 @@ options:
notes:
- Spaces in group names are converted to dashes '-'.
- This module is also supported for Windows targets.
+- Though this module does not change the remote host,
+ we do provide 'changed' status as it can be useful
+ for those trying to track inventory changes.
seealso:
- module: ansible.builtin.add_host
author:
diff --git a/lib/ansible/modules/hostname.py b/lib/ansible/modules/hostname.py
index f27e27ea..26556fc8 100644
--- a/lib/ansible/modules/hostname.py
+++ b/lib/ansible/modules/hostname.py
@@ -18,19 +18,29 @@ version_added: "1.4"
short_description: Manage hostname
requirements: [ hostname ]
description:
- - Set system's hostname, supports most OSs/Distributions, including those using systemd.
- - Note, this module does *NOT* modify C(/etc/hosts). You need to modify it yourself using other modules like template or replace.
- - Windows, HP-UX and AIX are not currently supported.
+ - Set system's hostname. Supports most OSs/Distributions including those using C(systemd).
+ - Windows, HP-UX, and AIX are not currently supported.
+notes:
+ - This module does B(NOT) modify C(/etc/hosts). You need to modify it yourself using other modules such as M(ansible.builtin.template)
+ or M(ansible.builtin.replace).
+ - On macOS, this module uses C(scutil) to set C(HostName), C(ComputerName), and C(LocalHostName). Since C(LocalHostName)
+ cannot contain spaces or most special characters, this module will replace characters when setting C(LocalHostName).
+ - Supports C(check_mode).
options:
name:
description:
- - Name of the host
+ - Name of the host.
+ - If the value is a fully qualified domain name that does not resolve from the given host,
+ this will cause the module to hang for a few seconds while waiting for the name resolution attempt to timeout.
+ type: str
required: true
use:
description:
- Which strategy to use to update the hostname.
- - If not set we try to autodetect, but this can be problematic, specially with containers as they can present misleading information.
- choices: ['generic', 'debian','sles', 'redhat', 'alpine', 'systemd', 'openrc', 'openbsd', 'solaris', 'freebsd']
+ - If not set we try to autodetect, but this can be problematic, particularly with containers as they can present misleading information.
+ - Note that 'systemd' should be specified for RHEL/EL/CentOS 7+. Older distributions should use 'redhat'.
+ choices: ['alpine', 'debian', 'freebsd', 'generic', 'macos', 'macosx', 'darwin', 'openbsd', 'openrc', 'redhat', 'sles', 'solaris', 'systemd']
+ type: str
version_added: '2.9'
'''
@@ -38,6 +48,11 @@ EXAMPLES = '''
- name: Set a hostname
ansible.builtin.hostname:
name: web01
+
+- name: Set a hostname specifying strategy
+ ansible.builtin.hostname:
+ name: web01
+ strategy: systemd
'''
import os
@@ -52,10 +67,24 @@ from ansible.module_utils.basic import (
)
from ansible.module_utils.common.sys_info import get_platform_subclass
from ansible.module_utils.facts.system.service_mgr import ServiceMgrFactCollector
-from ansible.module_utils._text import to_native
-
-STRATS = {'generic': 'Generic', 'debian': 'Debian', 'sles': 'SLES', 'redhat': 'RedHat', 'alpine': 'Alpine',
- 'systemd': 'Systemd', 'openrc': 'OpenRC', 'openbsd': 'OpenBSD', 'solaris': 'Solaris', 'freebsd': 'FreeBSD'}
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.six import PY3, text_type
+
+STRATS = {
+ 'alpine': 'Alpine',
+ 'debian': 'Debian',
+ 'freebsd': 'FreeBSD',
+ 'generic': 'Generic',
+ 'macos': 'Darwin',
+ 'macosx': 'Darwin',
+ 'darwin': 'Darwin',
+ 'openbsd': 'OpenBSD',
+ 'openrc': 'OpenRC',
+ 'redhat': 'RedHat',
+ 'sles': 'SLES',
+ 'solaris': 'Solaris',
+ 'systemd': 'Systemd',
+}
class UnimplementedStrategy(object):
@@ -155,9 +184,7 @@ class GenericStrategy(object):
def __init__(self, module):
self.module = module
self.changed = False
- self.hostname_cmd = self.module.get_bin_path('hostnamectl', False)
- if not self.hostname_cmd:
- self.hostname_cmd = self.module.get_bin_path('hostname', True)
+ self.hostname_cmd = self.module.get_bin_path('hostname', True)
def update_current_and_permanent_hostname(self):
self.update_current_hostname()
@@ -374,6 +401,10 @@ class SystemdStrategy(GenericStrategy):
the hostnamectl command.
"""
+ def __init__(self, module):
+ super(SystemdStrategy, self).__init__(module)
+ self.hostname_cmd = self.module.get_bin_path('hostnamectl', True)
+
def get_current_hostname(self):
cmd = [self.hostname_cmd, '--transient', 'status']
rc, out, err = self.module.run_command(cmd)
@@ -576,6 +607,116 @@ class FreeBSDStrategy(GenericStrategy):
f.close()
+class DarwinStrategy(GenericStrategy):
+ """
+ This is a macOS hostname manipulation strategy class. It uses
+ /usr/sbin/scutil to set ComputerName, HostName, and LocalHostName.
+
+ HostName corresponds to what most platforms consider to be hostname.
+ It controls the name used on the command line and SSH.
+
+ However, macOS also has LocalHostName and ComputerName settings.
+ LocalHostName controls the Bonjour/ZeroConf name, used by services
+ like AirDrop. This class implements a method, _scrub_hostname(), that mimics
+ the transformations macOS makes on hostnames when enterened in the Sharing
+ preference pane. It replaces spaces with dashes and removes all special
+ characters.
+
+ ComputerName is the name used for user-facing GUI services, like the
+ System Preferences/Sharing pane and when users connect to the Mac over the network.
+ """
+
+ def __init__(self, module):
+ super(DarwinStrategy, self).__init__(module)
+ self.scutil = self.module.get_bin_path('scutil', True)
+ self.name_types = ('HostName', 'ComputerName', 'LocalHostName')
+ self.scrubbed_name = self._scrub_hostname(self.module.params['name'])
+
+ def _make_translation(self, replace_chars, replacement_chars, delete_chars):
+ if PY3:
+ return str.maketrans(replace_chars, replacement_chars, delete_chars)
+
+ if not isinstance(replace_chars, text_type) or not isinstance(replacement_chars, text_type):
+ raise ValueError('replace_chars and replacement_chars must both be strings')
+ if len(replace_chars) != len(replacement_chars):
+ raise ValueError('replacement_chars must be the same length as replace_chars')
+
+ table = dict(zip((ord(c) for c in replace_chars), replacement_chars))
+ for char in delete_chars:
+ table[ord(char)] = None
+
+ return table
+
+ def _scrub_hostname(self, name):
+ """
+ LocalHostName only accepts valid DNS characters while HostName and ComputerName
+ accept a much wider range of characters. This function aims to mimic how macOS
+ translates a friendly name to the LocalHostName.
+ """
+
+ # Replace all these characters with a single dash
+ name = to_text(name)
+ replace_chars = u'\'"~`!@#$%^&*(){}[]/=?+\\|-_ '
+ delete_chars = u".'"
+ table = self._make_translation(replace_chars, u'-' * len(replace_chars), delete_chars)
+ name = name.translate(table)
+
+ # Replace multiple dashes with a single dash
+ while '-' * 2 in name:
+ name = name.replace('-' * 2, '')
+
+ name = name.rstrip('-')
+ return name
+
+ def get_current_hostname(self):
+ cmd = [self.scutil, '--get', 'HostName']
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0 and 'HostName: not set' not in err:
+ self.module.fail_json(msg="Failed to get current hostname rc=%d, out=%s, err=%s" % (rc, out, err))
+
+ return to_native(out).strip()
+
+ def get_permanent_hostname(self):
+ cmd = [self.scutil, '--get', 'ComputerName']
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Failed to get permanent hostname rc=%d, out=%s, err=%s" % (rc, out, err))
+
+ return to_native(out).strip()
+
+ def set_permanent_hostname(self, name):
+ for hostname_type in self.name_types:
+ cmd = [self.scutil, '--set', hostname_type]
+ if hostname_type == 'LocalHostName':
+ cmd.append(to_native(self.scrubbed_name))
+ else:
+ cmd.append(to_native(name))
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Failed to set {3} to '{2}': {0} {1}".format(to_native(out), to_native(err), to_native(name), hostname_type))
+
+ def set_current_hostname(self, name):
+ pass
+
+ def update_current_hostname(self):
+ pass
+
+ def update_permanent_hostname(self):
+ name = self.module.params['name']
+
+ # Get all the current host name values in the order of self.name_types
+ all_names = tuple(self.module.run_command([self.scutil, '--get', name_type])[1].strip() for name_type in self.name_types)
+
+ # Get the expected host name values based on the order in self.name_types
+ expected_names = tuple(self.scrubbed_name if n == 'LocalHostName' else name for n in self.name_types)
+
+ # Ensure all three names are updated
+ if all_names != expected_names:
+ if not self.module.check_mode:
+ self.set_permanent_hostname(name)
+ self.changed = True
+
+
class FedoraHostname(Hostname):
platform = 'Linux'
distribution = 'Fedora'
@@ -644,6 +785,12 @@ class ManjaroHostname(Hostname):
strategy_class = SystemdStrategy
+class ManjaroARMHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Manjaro-arm'
+ strategy_class = SystemdStrategy
+
+
class RHELHostname(Hostname):
platform = 'Linux'
distribution = 'Redhat'
@@ -674,6 +821,12 @@ class CloudlinuxHostname(Hostname):
strategy_class = RedHatStrategy
+class AlinuxHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Alinux'
+ strategy_class = RedHatStrategy
+
+
class CoreosHostname(Hostname):
platform = 'Linux'
distribution = 'Coreos'
@@ -734,6 +887,12 @@ class KaliHostname(Hostname):
strategy_class = DebianStrategy
+class ParrotHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Parrot'
+ strategy_class = DebianStrategy
+
+
class UbuntuHostname(Hostname):
platform = 'Linux'
distribution = 'Ubuntu'
@@ -812,12 +971,24 @@ class NeonHostname(Hostname):
strategy_class = DebianStrategy
+class DarwinHostname(Hostname):
+ platform = 'Darwin'
+ distribution = None
+ strategy_class = DarwinStrategy
+
+
class OsmcHostname(Hostname):
platform = 'Linux'
distribution = 'Osmc'
strategy_class = SystemdStrategy
+class PardusHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Pardus'
+ strategy_class = SystemdStrategy
+
+
class VoidLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Void'
@@ -830,6 +1001,12 @@ class PopHostname(Hostname):
strategy_class = DebianStrategy
+class RockyHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Rocky'
+ strategy_class = SystemdStrategy
+
+
def main():
module = AnsibleModule(
argument_spec=dict(
@@ -851,7 +1028,11 @@ def main():
name_before = current_hostname
elif name != permanent_hostname:
name_before = permanent_hostname
+ else:
+ name_before = permanent_hostname
+ # NOTE: socket.getfqdn() calls gethostbyaddr(socket.gethostname()), which can be
+ # slow to return if the name does not resolve correctly.
kw = dict(changed=changed, name=name,
ansible_facts=dict(ansible_hostname=name.split('.')[0],
ansible_nodename=name,
diff --git a/lib/ansible/modules/import_role.py b/lib/ansible/modules/import_role.py
index ea221d16..8de4f428 100644
--- a/lib/ansible/modules/import_role.py
+++ b/lib/ansible/modules/import_role.py
@@ -17,6 +17,7 @@ description:
between other tasks of the play.
- Most keywords, loops and conditionals will only be applied to the imported tasks, not to this statement itself. If
you want the opposite behavior, use M(ansible.builtin.include_role) instead.
+ - Does not work in handlers.
version_added: '2.4'
options:
name:
@@ -50,9 +51,15 @@ options:
type: str
default: main
version_added: '2.8'
+ rolespec_validate:
+ description:
+ - Perform role argument spec validation if an argument spec is defined.
+ type: bool
+ default: yes
+ version_added: '2.11'
notes:
- Handlers are made available to the whole play.
- - Since Ansible 2.7 variables defined in C(vars) and C(defaults) for the role are exposed at playbook parsing time.
+ - Since Ansible 2.7 variables defined in C(vars) and C(defaults) for the role are exposed to the play at playbook parsing time.
Due to this, these variables will be accessible to roles and tasks executed before the location of the
M(ansible.builtin.import_role) task.
- Unlike M(ansible.builtin.include_role) variable exposure is not configurable, and will always be exposed.
diff --git a/lib/ansible/modules/include_role.py b/lib/ansible/modules/include_role.py
index f1a0cf6f..663de725 100644
--- a/lib/ansible/modules/include_role.py
+++ b/lib/ansible/modules/include_role.py
@@ -15,11 +15,12 @@ module: include_role
short_description: Load and execute a role
description:
- Dynamically loads and executes a specified role as a task.
- - May be used only where Ansible tasks are allowed - inside C(pre_tasks), C(tasks), or C(post_tasks) playbook objects, or as a task inside a role.
+ - May be used only where Ansible tasks are allowed - inside C(pre_tasks), C(tasks), or C(post_tasks) play objects, or as a task inside a role.
- Task-level keywords, loops, and conditionals apply only to the C(include_role) statement itself.
- To apply keywords to the tasks within the role, pass them using the C(apply) option or use M(ansible.builtin.import_role) instead.
- Ignores some keywords, like C(until) and C(retries).
- This module is also supported for Windows targets.
+ - Does not work in handlers.
version_added: "2.2"
options:
apply:
@@ -53,10 +54,10 @@ options:
default: yes
public:
description:
- - This option dictates whether the role's C(vars) and C(defaults) are exposed to the playbook. If set to C(yes)
+ - This option dictates whether the role's C(vars) and C(defaults) are exposed to the play. If set to C(yes)
the variables will be available to tasks following the C(include_role) task. This functionality differs from
- standard variable exposure for roles listed under the C(roles) header or C(import_role) as they are exposed at
- playbook parsing time, and available to earlier roles and tasks as well.
+ standard variable exposure for roles listed under the C(roles) header or C(import_role) as they are exposed
+ to the play at playbook parsing time, and available to earlier roles and tasks as well.
type: bool
default: no
version_added: '2.7'
@@ -66,6 +67,12 @@ options:
type: str
default: main
version_added: '2.8'
+ rolespec_validate:
+ description:
+ - Perform role argument spec validation if an argument spec is defined.
+ type: bool
+ default: yes
+ version_added: '2.11'
notes:
- Handlers are made available to the whole play.
- Before Ansible 2.4, as with C(include), this task could be static or dynamic, If static, it implied that it won't
diff --git a/lib/ansible/modules/iptables.py b/lib/ansible/modules/iptables.py
index 994f6404..03122e07 100644
--- a/lib/ansible/modules/iptables.py
+++ b/lib/ansible/modules/iptables.py
@@ -118,10 +118,12 @@ options:
description:
- List of flags you want to examine.
type: list
+ elements: str
flags_set:
description:
- Flags to be set.
type: list
+ elements: str
match:
description:
- Specifies a match to use, that is, an extension module that tests for
@@ -130,6 +132,7 @@ options:
- Matches are evaluated first to last if specified as an array and work in short-circuit
fashion, i.e. if one extension yields false, evaluation will stop.
type: list
+ elements: str
default: []
jump:
description:
@@ -217,6 +220,13 @@ options:
This is only valid if the rule also specifies one of the following
protocols: tcp, udp, dccp or sctp."
type: str
+ destination_ports:
+ description:
+ - This specifies multiple destination port numbers or port ranges to match in the multiport module.
+ - It can only be used in conjunction with the protocols tcp, udp, udplite, dccp and sctp.
+ type: list
+ elements: str
+ version_added: "2.11"
to_ports:
description:
- This specifies a destination port or range of ports to use, without
@@ -268,6 +278,7 @@ options:
- A list of the connection states to match in the conntrack module.
- Possible values are C(INVALID), C(NEW), C(ESTABLISHED), C(RELATED), C(UNTRACKED), C(SNAT), C(DNAT).
type: list
+ elements: str
default: []
src_range:
description:
@@ -279,6 +290,22 @@ options:
- Specifies the destination IP range to match in the iprange module.
type: str
version_added: "2.8"
+ match_set:
+ description:
+ - Specifies a set name which can be defined by ipset.
+ - Must be used together with the match_set_flags parameter.
+ - When the C(!) argument is prepended then it inverts the rule.
+ - Uses the iptables set extension.
+ type: str
+ version_added: "2.11"
+ match_set_flags:
+ description:
+ - Specifies the necessary flags for the match_set parameter.
+ - Must be used together with the match_set parameter.
+ - Uses the iptables set extension.
+ type: str
+ choices: [ "src", "dst", "src,dst", "dst,src" ]
+ version_added: "2.11"
limit:
description:
- Specifies the maximum average number of matches to allow per second.
@@ -329,7 +356,9 @@ options:
- Set the policy for the chain to the given target.
- Only built-in chains can have policies.
- This parameter requires the C(chain) parameter.
- - Ignores all other parameters.
+ - If you specify this parameter, all other parameters will be ignored.
+ - This parameter is used to set default policy for the given C(chain).
+ Do not confuse this with C(jump) parameter.
type: str
choices: [ ACCEPT, DROP, QUEUE, RETURN ]
version_added: "2.2"
@@ -386,6 +415,14 @@ EXAMPLES = r'''
dst_range: 10.0.0.1-10.0.0.50
jump: ACCEPT
+- name: Allow source IPs defined in ipset "admin_hosts" on port 22
+ ansible.builtin.iptables:
+ chain: INPUT
+ match_set: admin_hosts
+ match_set_flags: src
+ destination_port: 22
+ jump: ALLOW
+
- name: Tag all outbound tcp packets with DSCP mark 8
ansible.builtin.iptables:
chain: OUTPUT
@@ -411,6 +448,7 @@ EXAMPLES = r'''
action: insert
rule_num: 5
+# Think twice before running following task as this may lock target system
- name: Set the policy for the INPUT chain to DROP
ansible.builtin.iptables:
chain: INPUT
@@ -458,6 +496,16 @@ EXAMPLES = r'''
limit_burst: 20
log_prefix: "IPTABLES:INFO: "
log_level: info
+
+- name: Allow connections on multiple ports
+ ansible.builtin.iptables:
+ chain: INPUT
+ protocol: tcp
+ destination_ports:
+ - "80"
+ - "443"
+ - "8081:8083"
+ jump: ACCEPT
'''
import re
@@ -541,6 +589,8 @@ def construct_rule(params):
append_param(rule, params['log_prefix'], '--log-prefix', False)
append_param(rule, params['log_level'], '--log-level', False)
append_param(rule, params['to_destination'], '--to-destination', False)
+ append_match(rule, params['destination_ports'], 'multiport')
+ append_csv(rule, params['destination_ports'], '--dports')
append_param(rule, params['to_source'], '--to-source', False)
append_param(rule, params['goto'], '-g', False)
append_param(rule, params['in_interface'], '-i', False)
@@ -571,6 +621,13 @@ def construct_rule(params):
append_match(rule, params['src_range'] or params['dst_range'], 'iprange')
append_param(rule, params['src_range'], '--src-range', False)
append_param(rule, params['dst_range'], '--dst-range', False)
+ if 'set' in params['match']:
+ append_param(rule, params['match_set'], '--match-set', False)
+ append_match_flag(rule, 'match', params['match_set_flags'], False)
+ elif params['match_set']:
+ append_match(rule, params['match_set'], 'set')
+ append_param(rule, params['match_set'], '--match-set', False)
+ append_match_flag(rule, 'match', params['match_set_flags'], False)
append_match(rule, params['limit'] or params['limit_burst'], 'limit')
append_param(rule, params['limit'], '--limit', False)
append_param(rule, params['limit_burst'], '--limit-burst', False)
@@ -637,7 +694,7 @@ def set_chain_policy(iptables_path, module, params):
def get_chain_policy(iptables_path, module, params):
- cmd = push_arguments(iptables_path, '-L', params)
+ cmd = push_arguments(iptables_path, '-L', params, make_rule=False)
rc, out, _ = module.run_command(cmd, check_rc=True)
chain_header = out.split("\n")[0]
result = re.search(r'\(policy ([A-Z]+)\)', chain_header)
@@ -668,11 +725,11 @@ def main():
to_source=dict(type='str'),
destination=dict(type='str'),
to_destination=dict(type='str'),
- match=dict(type='list', default=[]),
+ match=dict(type='list', elements='str', default=[]),
tcp_flags=dict(type='dict',
options=dict(
- flags=dict(type='list'),
- flags_set=dict(type='list'))
+ flags=dict(type='list', elements='str'),
+ flags_set=dict(type='list', elements='str'))
),
jump=dict(type='str'),
gateway=dict(type='str'),
@@ -690,13 +747,16 @@ def main():
set_counters=dict(type='str'),
source_port=dict(type='str'),
destination_port=dict(type='str'),
+ destination_ports=dict(type='list', elements='str', default=[]),
to_ports=dict(type='str'),
set_dscp_mark=dict(type='str'),
set_dscp_mark_class=dict(type='str'),
comment=dict(type='str'),
- ctstate=dict(type='list', default=[]),
+ ctstate=dict(type='list', elements='str', default=[]),
src_range=dict(type='str'),
dst_range=dict(type='str'),
+ match_set=dict(type='str'),
+ match_set_flags=dict(type='str', choices=['src', 'dst', 'src,dst', 'dst,src']),
limit=dict(type='str'),
limit_burst=dict(type='str'),
uid_owner=dict(type='str'),
diff --git a/lib/ansible/modules/known_hosts.py b/lib/ansible/modules/known_hosts.py
index 221e24cc..8d4b226f 100644
--- a/lib/ansible/modules/known_hosts.py
+++ b/lib/ansible/modules/known_hosts.py
@@ -24,8 +24,8 @@ options:
- The host to add or remove (must match a host specified in key). It will be converted to lowercase so that ssh-keygen can find it.
- Must match with <hostname> or <ip> present in key attribute.
- For custom SSH port, C(name) needs to specify port as well. See example section.
- required: true
type: str
+ required: true
key:
description:
- The SSH public host key, as a string.
@@ -335,7 +335,7 @@ def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, type='str', aliases=['host']),
- key=dict(required=False, type='str'),
+ key=dict(required=False, type='str', no_log=False),
path=dict(default="~/.ssh/known_hosts", type='path'),
hash_host=dict(required=False, type='bool', default=False),
state=dict(default='present', choices=['absent', 'present']),
diff --git a/lib/ansible/modules/lineinfile.py b/lib/ansible/modules/lineinfile.py
index 7b364499..1ba53484 100644
--- a/lib/ansible/modules/lineinfile.py
+++ b/lib/ansible/modules/lineinfile.py
@@ -44,6 +44,17 @@ options:
type: str
aliases: [ regex ]
version_added: '1.7'
+ search_string:
+ description:
+ - The literal string to look for in every line of the file. This does not have to match the entire line.
+ - For C(state=present), the line to replace if the string is found in the file. Only the last line found will be replaced.
+ - For C(state=absent), the line(s) to remove if the string is in the line.
+ - If the literal expression is not matched, the line will be
+ added to the file in keeping with C(insertbefore) or C(insertafter)
+ settings.
+ - Mutually exclusive with C(backrefs) and C(regexp).
+ type: str
+ version_added: '2.11'
state:
description:
- Whether the line should be there or not.
@@ -68,6 +79,7 @@ options:
does not match anywhere in the file, the file will be left unchanged.
- If the C(regexp) does match, the last matching line will be replaced by
the expanded line parameter.
+ - Mutually exclusive with C(search_string).
type: bool
default: no
version_added: "1.1"
@@ -136,6 +148,7 @@ seealso:
author:
- Daniel Hokka Zakrissoni (@dhozac)
- Ahti Kitsik (@ahtik)
+ - Jose Angel Munoz (@imjoseangel)
'''
EXAMPLES = r'''
@@ -161,6 +174,15 @@ EXAMPLES = r'''
group: root
mode: '0644'
+- name: Replace a localhost entry searching for a literal string to avoid escaping
+ lineinfile:
+ path: /etc/hosts
+ search_string: '127.0.0.1'
+ line: 127.0.0.1 localhost
+ owner: root
+ group: root
+ mode: '0644'
+
- name: Ensure the default Apache port is 8080
ansible.builtin.lineinfile:
path: /etc/httpd/conf/httpd.conf
@@ -168,6 +190,13 @@ EXAMPLES = r'''
insertafter: '^#Listen '
line: Listen 8080
+- name: Ensure php extension matches new pattern
+ lineinfile:
+ path: /etc/httpd/conf/httpd.conf
+ search_string: '<FilesMatch ".php[45]?$">'
+ insertafter: '^\t<Location \/>\n'
+ line: ' <FilesMatch ".php[34]?$">'
+
- name: Ensure we have our own comment added to /etc/services
ansible.builtin.lineinfile:
path: /etc/services
@@ -253,7 +282,7 @@ def check_file_attrs(module, changed, message, diff):
return message, changed
-def present(module, dest, regexp, line, insertafter, insertbefore, create,
+def present(module, dest, regexp, search_string, line, insertafter, insertbefore, create,
backup, backrefs, firstmatch):
diff = {'before': '',
@@ -301,8 +330,8 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create,
# "If regular expressions are passed to both regexp and
# insertafter, insertafter is only honored if no match for regexp is found."
# Therefore:
- # 1. regexp was found -> ignore insertafter, replace the founded line
- # 2. regexp was not found -> insert the line after 'insertafter' or 'insertbefore' line
+ # 1. regexp or search_string was found -> ignore insertafter, replace the founded line
+ # 2. regexp or search_string was not found -> insert the line after 'insertafter' or 'insertbefore' line
# Given the above:
# 1. First check that there is no match for regexp:
@@ -315,7 +344,17 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create,
if firstmatch:
break
- # 2. When no match found on the previous step,
+ # 2. Second check that there is no match for search_string:
+ if search_string is not None:
+ for lineno, b_cur_line in enumerate(b_lines):
+ match_found = to_bytes(search_string, errors='surrogate_or_strict') in b_cur_line
+ if match_found:
+ index[0] = lineno
+ match = match_found
+ if firstmatch:
+ break
+
+ # 3. When no match found on the previous step,
# parse for searching insertafter/insertbefore:
if not match:
for lineno, b_cur_line in enumerate(b_lines):
@@ -350,9 +389,9 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create,
if not b_new_line.endswith(b_linesep):
b_new_line += b_linesep
- # If no regexp was given and no line match is found anywhere in the file,
+ # If no regexp or search_string was given and no line match is found anywhere in the file,
# insert the line appropriately if using insertbefore or insertafter
- if regexp is None and match is None and not exact_line_match:
+ if (regexp, search_string, match) == (None, None, None) and not exact_line_match:
# Insert lines
if insertafter and insertafter != 'EOF':
@@ -428,7 +467,7 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create,
msg = 'line added'
changed = True
- # insert matched, but not the regexp
+ # insert matched, but not the regexp or search_string
else:
b_lines.insert(index[1], b_line + b_linesep)
msg = 'line added'
@@ -456,7 +495,7 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create,
module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=difflist)
-def absent(module, dest, regexp, line, backup):
+def absent(module, dest, regexp, search_string, line, backup):
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if not os.path.exists(b_dest):
@@ -483,6 +522,8 @@ def absent(module, dest, regexp, line, backup):
def matcher(b_cur_line):
if regexp is not None:
match_found = bre_c.search(b_cur_line)
+ elif search_string is not None:
+ match_found = to_bytes(search_string, errors='surrogate_or_strict') in b_cur_line
else:
match_found = b_line == b_cur_line.rstrip(b'\r\n')
if match_found:
@@ -521,6 +562,7 @@ def main():
path=dict(type='path', required=True, aliases=['dest', 'destfile', 'name']),
state=dict(type='str', default='present', choices=['absent', 'present']),
regexp=dict(type='str', aliases=['regex']),
+ search_string=dict(type='str'),
line=dict(type='str', aliases=['value']),
insertafter=dict(type='str'),
insertbefore=dict(type='str'),
@@ -530,7 +572,8 @@ def main():
firstmatch=dict(type='bool', default=False),
validate=dict(type='str'),
),
- mutually_exclusive=[['insertbefore', 'insertafter']],
+ mutually_exclusive=[
+ ['insertbefore', 'insertafter'], ['regexp', 'search_string'], ['backrefs', 'search_string']],
add_file_common_args=True,
supports_check_mode=True,
)
@@ -542,13 +585,17 @@ def main():
path = params['path']
firstmatch = params['firstmatch']
regexp = params['regexp']
+ search_string = params['search_string']
line = params['line']
- if regexp == '':
- module.warn(
- "The regular expression is an empty string, which will match every line in the file. "
- "This may have unintended consequences, such as replacing the last line in the file rather than appending. "
- "If this is desired, use '^' to match every line in the file and avoid this warning.")
+ if '' in [regexp, search_string]:
+ msg = ("The %s is an empty string, which will match every line in the file. "
+ "This may have unintended consequences, such as replacing the last line in the file rather than appending.")
+ param_name = 'search string'
+ if regexp == '':
+ param_name = 'regular expression'
+ msg += " If this is desired, use '^' to match every line in the file and avoid this warning."
+ module.warn(msg % param_name)
b_path = to_bytes(path, errors='surrogate_or_strict')
if os.path.isdir(b_path):
@@ -567,13 +614,13 @@ def main():
if ins_bef is None and ins_aft is None:
ins_aft = 'EOF'
- present(module, path, regexp, line,
+ present(module, path, regexp, search_string, line,
ins_aft, ins_bef, create, backup, backrefs, firstmatch)
else:
- if regexp is None and line is None:
- module.fail_json(msg='one of line or regexp is required with state=absent')
+ if (regexp, search_string, line) == (None, None, None):
+ module.fail_json(msg='one of line, search_string, or regexp is required with state=absent')
- absent(module, path, regexp, line, backup)
+ absent(module, path, regexp, search_string, line, backup)
if __name__ == '__main__':
diff --git a/lib/ansible/modules/package.py b/lib/ansible/modules/package.py
index f15ad5af..c8f03f06 100644
--- a/lib/ansible/modules/package.py
+++ b/lib/ansible/modules/package.py
@@ -17,8 +17,14 @@ author:
- Ansible Core Team
short_description: Generic OS package manager
description:
- - Installs, upgrade and removes packages using the underlying OS package manager.
- - For Windows targets, use the M(ansible.windows.win_package) module instead.
+ - This modules manages packages on a target without specifying a package manager module (like M(ansible.builtin.yum), M(ansible.builtin.apt), ...).
+ It is convenient to use in an heterogeneous environment of machines without having to create a specific task for
+ each package manager. `package` calls behind the module for the package manager used by the operating system
+ discovered by the module M(ansible.builtin.setup). If `setup` was not yet run, `package` will run it.
+ - This module acts as a proxy to the underlying package manager module. While all arguments will be passed to the
+ underlying module, not all modules support the same arguments. This documentation only covers the minimum intersection
+ of module arguments that all packaging modules support.
+ - For Windows targets, use the M(ansible.windows.win_package) module instead.
options:
name:
description:
@@ -39,8 +45,8 @@ options:
requirements:
- Whatever is required for the package plugins specific for each system.
notes:
- - This module actually calls the pertinent package modules for each system (apt, yum, etc).
- - For Windows targets, use the M(ansible.windows.win_package) module instead.
+ - While `package` abstracts package managers to ease dealing with multiple distributions, package name often differs for the same software.
+
'''
EXAMPLES = '''
- name: Install ntpdate
diff --git a/lib/ansible/modules/package_facts.py b/lib/ansible/modules/package_facts.py
index 97bfd6f0..202b7fad 100644
--- a/lib/ansible/modules/package_facts.py
+++ b/lib/ansible/modules/package_facts.py
@@ -19,10 +19,11 @@ options:
- The package manager used by the system so we can query the package information.
- Since 2.8 this is a list and can support multiple package managers per system.
- The 'portage' and 'pkg' options were added in version 2.8.
+ - The 'apk' option was added in version 2.11.
default: ['auto']
- choices: ['auto', 'rpm', 'apt', 'portage', 'pkg', 'pacman']
- required: False
+ choices: ['auto', 'rpm', 'apt', 'portage', 'pkg', 'pacman', 'apk']
type: list
+ elements: str
strategy:
description:
- This option controls how the module queries the package managers on the system.
@@ -30,6 +31,7 @@ options:
C(all) will return information for all supported and available package managers on the system.
choices: ['first', 'all']
default: 'first'
+ type: str
version_added: "2.8"
version_added: "2.5"
requirements:
@@ -210,6 +212,7 @@ import re
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.common.respawn import has_respawned, probe_interpreters_for_module, respawn_module
from ansible.module_utils.facts.packages import LibMgr, CLIMgr, get_all_pkg_managers
@@ -233,8 +236,19 @@ class RPM(LibMgr):
try:
get_bin_path('rpm')
+
+ if not we_have_lib and not has_respawned():
+ # try to locate an interpreter with the necessary lib
+ interpreters = ['/usr/libexec/platform-python',
+ '/usr/bin/python3',
+ '/usr/bin/python2']
+ interpreter_path = probe_interpreters_for_module(interpreters, self.LIB)
+ if interpreter_path:
+ respawn_module(interpreter_path)
+ # end of the line for this process; this module will exit when the respawned copy completes
+
if not we_have_lib:
- module.warn('Found "rpm" but %s' % (missing_required_lib('rpm')))
+ module.warn('Found "rpm" but %s' % (missing_required_lib(self.LIB)))
except ValueError:
pass
@@ -267,8 +281,18 @@ class APT(LibMgr):
except ValueError:
continue
else:
+ if not has_respawned():
+ # try to locate an interpreter with the necessary lib
+ interpreters = ['/usr/bin/python3',
+ '/usr/bin/python2']
+ interpreter_path = probe_interpreters_for_module(interpreters, self.LIB)
+ if interpreter_path:
+ respawn_module(interpreter_path)
+ # end of the line for this process; this module will exit here when respawned copy completes
+
module.warn('Found "%s" but %s' % (exe, missing_required_lib('apt')))
break
+
return we_have_lib
def list_installed(self):
@@ -378,6 +402,29 @@ class PORTAGE(CLIMgr):
return dict(zip(self.atoms, package.split()))
+class APK(CLIMgr):
+
+ CLI = 'apk'
+
+ def list_installed(self):
+ rc, out, err = module.run_command([self._cli, 'info', '-v'])
+ if rc != 0 or err:
+ raise Exception("Unable to list packages rc=%s : %s" % (rc, err))
+ return out.splitlines()
+
+ def get_package_details(self, package):
+ raw_pkg_details = {'name': package, 'version': '', 'release': ''}
+ nvr = package.rsplit('-', 2)
+ try:
+ return {
+ 'name': nvr[0],
+ 'version': nvr[1],
+ 'release': nvr[2],
+ }
+ except IndexError:
+ return raw_pkg_details
+
+
def main():
# get supported pkg managers
@@ -386,7 +433,7 @@ def main():
# start work
global module
- module = AnsibleModule(argument_spec=dict(manager={'type': 'list', 'default': ['auto']},
+ module = AnsibleModule(argument_spec=dict(manager={'type': 'list', 'elements': 'str', 'default': ['auto']},
strategy={'choices': ['first', 'all'], 'default': 'first'}),
supports_check_mode=True)
packages = {}
diff --git a/lib/ansible/modules/pip.py b/lib/ansible/modules/pip.py
index 97ae7d90..420e3a18 100644
--- a/lib/ansible/modules/pip.py
+++ b/lib/ansible/modules/pip.py
@@ -22,6 +22,7 @@ options:
- The name of a Python library to install or the url(bzr+,hg+,git+,svn+) of the remote package.
- This can be a list (since 2.2) and contain version specifiers (since 2.7).
type: list
+ elements: str
version:
description:
- The version number to install of the Python library specified in the I(name) parameter.
diff --git a/lib/ansible/modules/reboot.py b/lib/ansible/modules/reboot.py
index c5066560..28378ae5 100644
--- a/lib/ansible/modules/reboot.py
+++ b/lib/ansible/modules/reboot.py
@@ -60,7 +60,7 @@ options:
- Paths to search on the remote machine for the C(shutdown) command.
- I(Only) these paths will be searched for the C(shutdown) command. C(PATH) is ignored in the remote node when searching for the C(shutdown) command.
type: list
- default: ['/sbin', '/usr/sbin', '/usr/local/sbin']
+ default: ['/sbin', '/bin', '/usr/sbin', '/usr/bin', '/usr/local/sbin']
version_added: '2.8'
boot_time_command:
@@ -70,6 +70,16 @@ options:
type: str
default: 'cat /proc/sys/kernel/random/boot_id'
version_added: '2.10'
+
+ reboot_command:
+ description:
+ - Command to run that reboots the system, including any parameters passed to the command.
+ - Can be an absolute path to the command or just the command name. If an absolute path to the
+ command is not given, C(search_paths) on the target system will be searched to find the absolute path.
+ - This will cause C(pre_reboot_delay), C(post_reboot_delay), and C(msg) to be ignored.
+ type: str
+ default: '[determined based on target OS]'
+ version_added: '2.11'
seealso:
- module: ansible.windows.win_reboot
author:
@@ -89,6 +99,12 @@ EXAMPLES = r'''
reboot:
search_paths:
- '/lib/molly-guard'
+
+- name: Reboot machine using a custom reboot command
+ reboot:
+ reboot_command: launchctl reboot userspace
+ boot_time_command: uptime | cut -d ' ' -f 5
+
'''
RETURN = r'''
diff --git a/lib/ansible/modules/replace.py b/lib/ansible/modules/replace.py
index ed9fd1f2..09600f61 100644
--- a/lib/ansible/modules/replace.py
+++ b/lib/ansible/modules/replace.py
@@ -171,6 +171,7 @@ RETURN = r'''#'''
import os
import re
import tempfile
+from traceback import format_exc
from ansible.module_utils._text import to_text, to_bytes
from ansible.module_utils.basic import AnsibleModule
@@ -242,9 +243,12 @@ def main():
if not os.path.exists(path):
module.fail_json(rc=257, msg='Path %s does not exist !' % path)
else:
- f = open(path, 'rb')
- contents = to_text(f.read(), errors='surrogate_or_strict', encoding=encoding)
- f.close()
+ try:
+ with open(path, 'rb') as f:
+ contents = to_text(f.read(), errors='surrogate_or_strict', encoding=encoding)
+ except (OSError, IOError) as e:
+ module.fail_json(msg='Unable to read the contents of %s: %s' % (path, to_text(e)),
+ exception=format_exc())
pattern = u''
if params['after'] and params['before']:
diff --git a/lib/ansible/modules/rpm_key.py b/lib/ansible/modules/rpm_key.py
index c24d1ce1..0ab186dc 100644
--- a/lib/ansible/modules/rpm_key.py
+++ b/lib/ansible/modules/rpm_key.py
@@ -24,10 +24,12 @@ options:
description:
- Key that will be modified. Can be a url, a file on the managed node, or a keyid if the key
already exists in the database.
+ type: str
required: true
state:
description:
- If the key will be imported or removed from the rpm db.
+ type: str
default: present
choices: [ absent, present ]
validate_certs:
@@ -231,7 +233,7 @@ def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'present']),
- key=dict(type='str', required=True),
+ key=dict(type='str', required=True, no_log=False),
fingerprint=dict(type='str'),
validate_certs=dict(type='bool', default=True),
),
diff --git a/lib/ansible/modules/service.py b/lib/ansible/modules/service.py
index f4eb7097..056183be 100644
--- a/lib/ansible/modules/service.py
+++ b/lib/ansible/modules/service.py
@@ -135,7 +135,6 @@ import platform
import re
import select
import shlex
-import string
import subprocess
import tempfile
import time
@@ -419,7 +418,7 @@ class Service(object):
# Write out the contents of the list into our temporary file.
for rcline in new_rc_conf:
- os.write(TMP_RCCONF, rcline)
+ os.write(TMP_RCCONF, rcline.encode())
# Close temporary file.
os.close(TMP_RCCONF)
@@ -1116,7 +1115,7 @@ class DragonFlyBsdService(FreeBsdService):
if os.path.isfile(rcfile):
self.rcconf_file = rcfile
- self.rcconf_key = "%s" % string.replace(self.name, "-", "_")
+ self.rcconf_key = "%s" % self.name.replace("-", "_")
return self.service_enable_rcconf()
@@ -1274,7 +1273,7 @@ class NetBsdService(Service):
"""
This is the NetBSD Service manipulation class - it uses the /etc/rc.conf
file for controlling services started at boot, check status and perform
- direct service manipulation. Init scripts in /etc/rcd are used for
+ direct service manipulation. Init scripts in /etc/rc.d are used for
controlling services (start/stop) as well as for controlling the current
state.
"""
@@ -1304,7 +1303,7 @@ class NetBsdService(Service):
if os.path.isfile(rcfile):
self.rcconf_file = rcfile
- self.rcconf_key = "%s" % string.replace(self.name, "-", "_")
+ self.rcconf_key = "%s" % self.name.replace("-", "_")
return self.service_enable_rcconf()
@@ -1356,8 +1355,8 @@ class SunOSService(Service):
# Support for synchronous restart/refresh is only supported on
# Oracle Solaris >= 11.2
for line in open('/etc/release', 'r').readlines():
- m = re.match(r'\s+Oracle Solaris (\d+\.\d+).*', line.rstrip())
- if m and m.groups()[0] >= 11.2:
+ m = re.match(r'\s+Oracle Solaris (\d+)\.(\d+).*', line.rstrip())
+ if m and m.groups() >= ('11', '2'):
return True
def get_service_status(self):
diff --git a/lib/ansible/modules/service_facts.py b/lib/ansible/modules/service_facts.py
index 91de089c..cec49324 100644
--- a/lib/ansible/modules/service_facts.py
+++ b/lib/ansible/modules/service_facts.py
@@ -16,7 +16,7 @@ short_description: Return service state information as fact data
description:
- Return service state information as fact data for various service management utilities.
version_added: "2.5"
-requirements: ["Any of the following supported init systems: systemd, sysv, upstart"]
+requirements: ["Any of the following supported init systems: systemd, sysv, upstart, AIX SRC"]
notes:
- When accessing the C(ansible_facts.services) facts collected by this module,
@@ -25,6 +25,7 @@ notes:
C(ansible_facts.services.zuul-gateway). It is instead recommended to
using the string value of the service name as the key in order to obtain
the fact data value like C(ansible_facts.services['zuul-gateway'])
+ - AIX SRC was added in version 2.11.
- Supports C(check_mode).
author:
@@ -54,14 +55,14 @@ ansible_facts:
source:
description:
- Init system of the service.
- - One of C(systemd), C(sysv), C(upstart).
+ - One of C(rcctl), C(systemd), C(sysv), C(upstart), C(src).
returned: always
type: str
sample: sysv
state:
description:
- State of the service.
- - Either C(running), C(stopped), or C(unknown).
+ - Either C(failed), C(running), C(stopped), or C(unknown).
returned: always
type: str
sample: running
@@ -69,7 +70,7 @@ ansible_facts:
description:
- State of the service.
- Either C(enabled), C(disabled), C(static), C(indirect) or C(unknown).
- returned: systemd systems or RedHat/SUSE flavored sysvinit/upstart
+ returned: systemd systems or RedHat/SUSE flavored sysvinit/upstart or OpenBSD
type: str
sample: enabled
name:
@@ -80,6 +81,7 @@ ansible_facts:
'''
+import platform
import re
from ansible.module_utils.basic import AnsibleModule
@@ -231,10 +233,82 @@ class SystemctlScanService(BaseService):
return services
+class AIXScanService(BaseService):
+
+ def gather_services(self):
+ services = {}
+ if platform.system() != 'AIX':
+ return None
+ lssrc_path = self.module.get_bin_path("lssrc")
+ if lssrc_path is None:
+ return None
+ rc, stdout, stderr = self.module.run_command("%s -a" % lssrc_path)
+ for line in stdout.split('\n'):
+ line_data = line.split()
+ if len(line_data) < 2:
+ continue # Skipping because we expected more data
+ if line_data[0] == "Subsystem":
+ continue # Skip header
+ service_name = line_data[0]
+ if line_data[-1] == "active":
+ service_state = "running"
+ elif line_data[-1] == "inoperative":
+ service_state = "stopped"
+ else:
+ service_state = "unknown"
+ services[service_name] = {"name": service_name, "state": service_state, "source": "src"}
+ return services
+
+
+class OpenBSDScanService(BaseService):
+ def query_rcctl(self, cmd):
+ svcs = []
+
+ rc, stdout, stderr = self.module.run_command("%s ls %s" % (self.rcctl_path, cmd))
+ if 'needs root privileges' in stderr.lower():
+ self.incomplete_warning = True
+ return []
+
+ for svc in stdout.split('\n'):
+ if svc == '':
+ continue
+ else:
+ svcs.append(svc)
+
+ return svcs
+
+ def gather_services(self):
+ services = {}
+ self.rcctl_path = self.module.get_bin_path("rcctl")
+ if self.rcctl_path is None:
+ return None
+
+ for svc in self.query_rcctl('all'):
+ services[svc] = {'name': svc, 'source': 'rcctl'}
+
+ for svc in self.query_rcctl('on'):
+ services[svc].update({'status': 'enabled'})
+
+ for svc in self.query_rcctl('started'):
+ services[svc].update({'state': 'running'})
+
+ # Based on the list of services that are enabled, determine which are disabled
+ [services[svc].update({'status': 'disabled'}) for svc in services if services[svc].get('status') is None]
+
+ # and do the same for those are aren't running
+ [services[svc].update({'state': 'stopped'}) for svc in services if services[svc].get('state') is None]
+
+ # Override the state for services which are marked as 'failed'
+ for svc in self.query_rcctl('failed'):
+ services[svc].update({'state': 'failed'})
+
+ return services
+
+
def main():
module = AnsibleModule(argument_spec=dict(), supports_check_mode=True)
module.run_command_environ_update = dict(LANG="C", LC_ALL="C")
- service_modules = (ServiceScanService, SystemctlScanService)
+ service_modules = (ServiceScanService, SystemctlScanService, AIXScanService, OpenBSDScanService)
all_services = {}
incomplete_warning = False
for svc_module in service_modules:
diff --git a/lib/ansible/modules/set_fact.py b/lib/ansible/modules/set_fact.py
index 0f5300d0..fe8eadf1 100644
--- a/lib/ansible/modules/set_fact.py
+++ b/lib/ansible/modules/set_fact.py
@@ -11,22 +11,20 @@ __metaclass__ = type
DOCUMENTATION = r'''
---
module: set_fact
-short_description: Set host facts from a task
+short_description: Set host variable(s) and fact(s).
version_added: "1.2"
description:
- - This module allows setting new variables.
- - Variables are set on a host-by-host basis just like facts discovered by the setup module.
- - These variables will be available to subsequent plays during an ansible-playbook run.
- - Set C(cacheable) to C(yes) to save variables across executions
- using a fact cache. Variables created with set_fact have different precedence depending on whether they are or are not cached.
- - Per the standard Ansible variable precedence rules, many other types of variables have a higher priority, so this value may be overridden.
- - This module is also supported for Windows targets.
+ - This action allows setting variables associated to the current host.
+ - These variables will be available to subsequent plays during an ansible-playbook run via the host they were set on.
+ - Set C(cacheable) to C(yes) to save variables across executions using a fact cache.
+ Variables will keep the set_fact precedence for the current run, but will used 'cached fact' precedence for subsequent ones.
+ - Per the standard Ansible variable precedence rules, other types of variables have a higher priority, so this value may be overridden.
options:
key_value:
description:
- - The C(set_fact) module takes key=value pairs as variables to set
- in the playbook scope. Or alternatively, accepts complex arguments
- using the C(args:) statement.
+ - "The C(set_fact) module takes ``key=value`` pairs or ``key: value``(YAML notation) as variables to set in the playbook scope.
+ The 'key' is the resulting variable name and the value is, of course, the value of said variable."
+ - You can create multiple variables at once, by supplying multiple pairs, but do NOT mix notations.
required: true
cacheable:
description:
@@ -41,10 +39,15 @@ options:
default: no
version_added: "2.4"
notes:
- - "The C(var=value) notation can only create strings or booleans.
- If you want to create lists/arrays or dictionary/hashes use C(var: [val1, val2])."
- - Since 'cacheable' is now a module param, 'cacheable' is no longer a valid fact name as of Ansible 2.4.
- - This module is also supported for Windows targets.
+ - Because of the nature of tasks, set_fact will produce 'static' values for a variable.
+ Unlike normal 'lazy' variables, the value gets evaluated and templated on assignment.
+ - Some boolean values (yes, no, true, false) will always be converted to boolean type,
+ unless C(DEFAULT_JINJA2_NATIVE) is enabled. This is done so the C(var=value) booleans,
+ otherwise it would only be able to create strings, but it also prevents using those values to create YAML strings.
+ Using the setting will restrict k=v to strings, but will allow you to specify string or boolean in YAML.
+ - "To create lists/arrays or dictionary/hashes use YAML notation C(var: [val1, val2])."
+ - Since 'cacheable' is now a module param, 'cacheable' is no longer a valid fact name.
+ - This action does not use a connection and always executes on the controller.
seealso:
- module: ansible.builtin.include_vars
- ref: ansible_variable_precedence
@@ -54,7 +57,7 @@ author:
'''
EXAMPLES = r'''
-- name: Setting host facts using key=value pairs, note that this always creates strings or booleans
+- name: Setting host facts using key=value pairs, this format can only create strings or booleans
set_fact: one_fact="something" other_fact="{{ local_var }}"
- name: Setting host facts using complex arguments
@@ -69,12 +72,18 @@ EXAMPLES = r'''
other_fact: "{{ local_var * 2 }}"
cacheable: yes
-# As of Ansible 1.8, Ansible will convert boolean strings ('true', 'false', 'yes', 'no')
-# to proper boolean values when using the key=value syntax, however it is still
-# recommended that booleans be set using the complex argument style:
-- name: Setting booleans using complex argument style
+- name: Creating list and dictionary variables
set_fact:
- one_fact: yes
- other_fact: no
+ one_dict:
+ something: here
+ other: there
+ one_list:
+ - a
+ - b
+ - c
+- name: Creating list and dictionary variables using 'shorthand' YAML
+ set_fact:
+ two_dict: {'something': here2, 'other': somewhere}
+ two_list: [1,2,3]
'''
diff --git a/lib/ansible/modules/set_stats.py b/lib/ansible/modules/set_stats.py
index 65ae54a7..95f17c96 100644
--- a/lib/ansible/modules/set_stats.py
+++ b/lib/ansible/modules/set_stats.py
@@ -11,7 +11,7 @@ __metaclass__ = type
DOCUMENTATION = r'''
---
module: set_stats
-short_description: Set stats for the current ansible run
+short_description: Define and display stats for the current ansible run
description:
- This module allows setting/accumulating stats on the current ansible run, either per host or for all hosts in the run.
- This module is also supported for Windows targets.
@@ -33,29 +33,29 @@ options:
type: bool
default: yes
notes:
- - In order for custom stats to be displayed, you must set C(show_custom_stats) in C(ansible.cfg) or C(ANSIBLE_SHOW_CUSTOM_STATS) to C(yes).
+ - In order for custom stats to be displayed, you must set C(show_custom_stats) in section C([defaults]) in C(ansible.cfg)
+ or by defining environment variable C(ANSIBLE_SHOW_CUSTOM_STATS) to C(yes).
- This module is also supported for Windows targets.
version_added: "2.3"
'''
EXAMPLES = r'''
- name: Aggregating packages_installed stat per host
- set_stats:
+ ansible.builtin.set_stats:
data:
packages_installed: 31
per_host: yes
- name: Aggregating random stats for all hosts using complex arguments
- set_stats:
+ ansible.builtin.set_stats:
data:
one_stat: 11
other_stat: "{{ local_var * 2 }}"
another_stat: "{{ some_registered_var.results | map(attribute='ansible_facts.some_fact') | list }}"
per_host: no
-
- name: Setting stats (not aggregating)
- set_stats:
+ ansible.builtin.set_stats:
data:
the_answer: 42
aggregate: no
diff --git a/lib/ansible/modules/setup.py b/lib/ansible/modules/setup.py
index f2040dfc..13a7fd64 100644
--- a/lib/ansible/modules/setup.py
+++ b/lib/ansible/modules/setup.py
@@ -28,17 +28,27 @@ options:
use C(!all,!min), and specify the particular fact subsets.
Use the filter parameter if you do not want to display some collected
facts."
+ type: list
+ elements: str
default: "all"
gather_timeout:
version_added: "2.2"
description:
- Set the default timeout in seconds for individual fact gathering.
+ type: int
default: 10
filter:
version_added: "1.1"
description:
- - If supplied, only return facts that match this shell-style (fnmatch) wildcard.
- default: "*"
+ - If supplied, only return facts that match one of the shell-style
+ (fnmatch) pattern. An empty list basically means 'no filter'.
+ As of Ansible 2.11, the type has changed from string to list
+ and the default has became an empty list. A simple string is
+ still accepted and works as a single pattern. The behaviour
+ prior to Ansible 2.11 remains.
+ type: list
+ elements: str
+ default: []
fact_path:
version_added: "1.3"
description:
@@ -54,6 +64,7 @@ options:
exists on the target host. Files in this path MUST be PowerShell scripts C(.ps1)
which outputs an object. This object will be formatted by Ansible as json so the
script should be outputting a raw hashtable, array, or other primitive object.
+ type: path
default: /etc/ansible/facts.d
description:
- This module is automatically called by playbooks to gather useful
@@ -76,6 +87,8 @@ notes:
- This module is also supported for Windows targets.
- This module should be run with elevated privileges on BSD systems to gather facts like ansible_product_version.
- Supports C(check_mode).
+ - For more information about delegated facts,
+ please check U(https://docs.ansible.com/ansible/latest/user_guide/playbooks_delegation.html#delegating-facts).
author:
- "Ansible Core Team"
- "Michael DeHaan"
@@ -101,6 +114,13 @@ EXAMPLES = """
- '!any'
- facter
+- name: Collect only selected facts
+ ansible.builtin.setup:
+ filter:
+ - 'ansible_distribution'
+ - 'ansible_machine_id'
+ - 'ansible_*_mb'
+
# Display only facts about certain interfaces.
# ansible all -m ansible.builtin.setup -a 'filter=ansible_eth[0-2]'
@@ -121,23 +141,32 @@ EXAMPLES = """
# Display facts from Windows hosts with custom facts stored in C(C:\\custom_facts).
# ansible windows -m ansible.builtin.setup -a "fact_path='c:\\custom_facts'"
+
+# Gathers facts for the machines in the dbservers group (a.k.a Delegating facts)
+- hosts: app_servers
+ tasks:
+ - name: Gather facts from db servers
+ ansible.builtin.setup:
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ loop: "{{ groups['dbservers'] }}"
"""
# import module snippets
from ..module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+from ansible.module_utils.facts import ansible_collector, default_collectors
+from ansible.module_utils.facts.collector import CollectorNotFoundError, CycleFoundInFactDeps, UnresolvedFactDep
from ansible.module_utils.facts.namespace import PrefixFactNamespace
-from ansible.module_utils.facts import ansible_collector
-
-from ansible.module_utils.facts import default_collectors
def main():
module = AnsibleModule(
argument_spec=dict(
- gather_subset=dict(default=["all"], required=False, type='list'),
+ gather_subset=dict(default=["all"], required=False, type='list', elements='str'),
gather_timeout=dict(default=10, required=False, type='int'),
- filter=dict(default="*", required=False),
+ filter=dict(default=[], required=False, type='list', elements='str'),
fact_path=dict(default='/etc/ansible/facts.d', required=False, type='path'),
),
supports_check_mode=True,
@@ -162,13 +191,16 @@ def main():
namespace = PrefixFactNamespace(namespace_name='ansible',
prefix='ansible_')
- fact_collector = \
- ansible_collector.get_ansible_collector(all_collector_classes=all_collector_classes,
- namespace=namespace,
- filter_spec=filter_spec,
- gather_subset=gather_subset,
- gather_timeout=gather_timeout,
- minimal_gather_subset=minimal_gather_subset)
+ try:
+ fact_collector = ansible_collector.get_ansible_collector(all_collector_classes=all_collector_classes,
+ namespace=namespace,
+ filter_spec=filter_spec,
+ gather_subset=gather_subset,
+ gather_timeout=gather_timeout,
+ minimal_gather_subset=minimal_gather_subset)
+ except (TypeError, CollectorNotFoundError, CycleFoundInFactDeps, UnresolvedFactDep) as e:
+ # bad subset given, collector, idk, deps declared but not found
+ module.fail_json(msg=to_text(e))
facts_dict = fact_collector.collect(module=module)
diff --git a/lib/ansible/modules/slurp.py b/lib/ansible/modules/slurp.py
index 919c79cc..0b9d8b6e 100644
--- a/lib/ansible/modules/slurp.py
+++ b/lib/ansible/modules/slurp.py
@@ -58,12 +58,30 @@ EXAMPLES = r'''
# 2179
'''
-RETURN = r'''#'''
+RETURN = r'''
+content:
+ description: Encoded file content
+ returned: success
+ type: str
+ sample: "MjE3OQo="
+encoding:
+ description: Type of encoding used for file
+ returned: success
+ type: str
+ sample: "base64"
+source:
+ description: Actual path of file slurped
+ returned: success
+ type: str
+ sample: "/var/run/sshd.pid"
+'''
import base64
+import errno
import os
from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
def main():
@@ -75,13 +93,21 @@ def main():
)
source = module.params['src']
- if not os.path.exists(source):
- module.fail_json(msg="file not found: %s" % source)
- if not os.access(source, os.R_OK):
- module.fail_json(msg="file is not readable: %s" % source)
+ try:
+ with open(source, 'rb') as source_fh:
+ source_content = source_fh.read()
+ except (IOError, OSError) as e:
+ if e.errno == errno.ENOENT:
+ msg = "file not found: %s" % source
+ elif e.errno == errno.EACCES:
+ msg = "file is not readable: %s" % source
+ elif e.errno == errno.EISDIR:
+ msg = "source is a directory and must be a file: %s" % source
+ else:
+ msg = "unable to slurp file: %s" % to_native(e, errors='surrogate_then_replace')
+
+ module.fail_json(msg)
- with open(source, 'rb') as source_fh:
- source_content = source_fh.read()
data = base64.b64encode(source_content)
module.exit_json(content=data, source=source, encoding='base64')
diff --git a/lib/ansible/modules/stat.py b/lib/ansible/modules/stat.py
index 717a8ac2..d474aac3 100644
--- a/lib/ansible/modules/stat.py
+++ b/lib/ansible/modules/stat.py
@@ -302,14 +302,14 @@ stat:
sample: ../foobar/21102015-1445431274-908472971
version_added: 2.4
md5:
- description: md5 hash of the path; this will be removed in Ansible 2.9 in
+ description: md5 hash of the file; this will be removed in Ansible 2.9 in
favor of the checksum return value
returned: success, path exists and user can read stats and path
supports hashing and md5 is supported
type: str
sample: f88fa92d8cf2eeecf4c0a50ccc96d0c0
checksum:
- description: hash of the path
+ description: hash of the file
returned: success, path exists, user can read stats, path supports
hashing and supplied checksum algorithm is available
type: str
@@ -521,11 +521,11 @@ def main():
output['mimetype'] = output['charset'] = 'unknown'
mimecmd = module.get_bin_path('file')
if mimecmd:
- mimecmd = [mimecmd, '-i', b_path]
+ mimecmd = [mimecmd, '--mime-type', '--mime-encoding', b_path]
try:
rc, out, err = module.run_command(mimecmd)
if rc == 0:
- mimetype, charset = out.split(':')[1].split(';')
+ mimetype, charset = out.rsplit(':', 1)[1].split(';')
output['mimetype'] = mimetype.strip()
output['charset'] = charset.split('=')[1].strip()
except Exception:
diff --git a/lib/ansible/modules/subversion.py b/lib/ansible/modules/subversion.py
index 4a5e1dae..c62146af 100644
--- a/lib/ansible/modules/subversion.py
+++ b/lib/ansible/modules/subversion.py
@@ -24,17 +24,20 @@ options:
repo:
description:
- The subversion URL to the repository.
+ type: str
required: true
aliases: [ name, repository ]
dest:
description:
- Absolute path where the repository should be deployed.
- required: true
+ - The destination directory must be specified unless I(checkout=no), I(update=no), and I(export=no).
+ type: path
revision:
description:
- Specific revision to checkout.
+ type: str
default: HEAD
- aliases: [ version ]
+ aliases: [ rev, version ]
force:
description:
- If C(yes), modified files will be discarded. If C(no), module will fail if it encounters modified files.
@@ -51,15 +54,18 @@ options:
username:
description:
- C(--username) parameter passed to svn.
+ type: str
password:
description:
- C(--password) parameter passed to svn when svn is less than version 1.10.0. This is not secure and
the password will be leaked to argv.
- C(--password-from-stdin) parameter when svn is greater or equal to version 1.10.0.
+ type: str
executable:
description:
- Path to svn executable to use. If not supplied,
the normal mechanism for resolving binary paths will be used.
+ type: path
version_added: "1.4"
checkout:
description:
@@ -85,6 +91,13 @@ options:
default: "yes"
version_added: "2.0"
type: bool
+ validate_certs:
+ description:
+ - If C(no), passes the C(--trust-server-cert) flag to svn.
+ - If C(yes), does not pass the flag.
+ default: "no"
+ version_added: "2.11"
+ type: bool
requirements:
- subversion (the command line tool with C(svn) entrypoint)
@@ -121,7 +134,14 @@ from ansible.module_utils.basic import AnsibleModule
class Subversion(object):
- def __init__(self, module, dest, repo, revision, username, password, svn_path):
+
+ # Example text matched by the regexp:
+ # Révision : 1889134
+ # 版本: 1889134
+ # Revision: 1889134
+ REVISION_RE = r'^\w+\s?:\s+\d+$'
+
+ def __init__(self, module, dest, repo, revision, username, password, svn_path, validate_certs):
self.module = module
self.dest = dest
self.repo = repo
@@ -129,6 +149,7 @@ class Subversion(object):
self.username = username
self.password = password
self.svn_path = svn_path
+ self.validate_certs = validate_certs
def has_option_password_from_stdin(self):
rc, version, err = self.module.run_command([self.svn_path, '--version', '--quiet'], check_rc=True)
@@ -139,9 +160,10 @@ class Subversion(object):
bits = [
self.svn_path,
'--non-interactive',
- '--trust-server-cert',
'--no-auth-cache',
]
+ if not self.validate_certs:
+ bits.append('--trust-server-cert')
stdin_data = None
if self.username:
bits.extend(["--username", self.username])
@@ -213,14 +235,28 @@ class Subversion(object):
def get_revision(self):
'''Revision and URL of subversion working directory.'''
text = '\n'.join(self._exec(["info", self.dest]))
- rev = re.search(r'^Revision:.*$', text, re.MULTILINE).group(0)
- url = re.search(r'^URL:.*$', text, re.MULTILINE).group(0)
+ rev = re.search(self.REVISION_RE, text, re.MULTILINE)
+ if rev:
+ rev = rev.group(0)
+ else:
+ rev = 'Unable to get revision'
+
+ url = re.search(r'^URL\s?:.*$', text, re.MULTILINE)
+ if url:
+ url = url.group(0)
+ else:
+ url = 'Unable to get URL'
+
return rev, url
def get_remote_revision(self):
'''Revision and URL of subversion working directory.'''
text = '\n'.join(self._exec(["info", self.repo]))
- rev = re.search(r'^Revision:.*$', text, re.MULTILINE).group(0)
+ rev = re.search(self.REVISION_RE, text, re.MULTILINE)
+ if rev:
+ rev = rev.group(0)
+ else:
+ rev = 'Unable to get remote revision'
return rev
def has_local_mods(self):
@@ -235,7 +271,11 @@ class Subversion(object):
def needs_update(self):
curr, url = self.get_revision()
out2 = '\n'.join(self._exec(["info", "-r", self.revision, self.dest]))
- head = re.search(r'^Revision:.*$', out2, re.MULTILINE).group(0)
+ head = re.search(self.REVISION_RE, out2, re.MULTILINE)
+ if head:
+ head = head.group(0)
+ else:
+ head = 'Unable to get revision'
rev1 = int(curr.split(':')[1].strip())
rev2 = int(head.split(':')[1].strip())
change = False
@@ -259,6 +299,7 @@ def main():
update=dict(type='bool', default=True),
switch=dict(type='bool', default=True),
in_place=dict(type='bool', default=False),
+ validate_certs=dict(type='bool', default=False),
),
supports_check_mode=True,
)
@@ -275,6 +316,7 @@ def main():
checkout = module.params['checkout']
update = module.params['update']
in_place = module.params['in_place']
+ validate_certs = module.params['validate_certs']
# We screenscrape a huge amount of svn commands so use C locale anytime we
# call run_command()
@@ -283,7 +325,7 @@ def main():
if not dest and (checkout or update or export):
module.fail_json(msg="the destination directory must be specified unless checkout=no, update=no, and export=no")
- svn = Subversion(module, dest, repo, revision, username, password, svn_path)
+ svn = Subversion(module, dest, repo, revision, username, password, svn_path, validate_certs)
if not export and not update and not checkout:
module.exit_json(changed=False, after=svn.get_remote_revision())
diff --git a/lib/ansible/modules/systemd.py b/lib/ansible/modules/systemd.py
index 26e79a54..eda8d05b 100644
--- a/lib/ansible/modules/systemd.py
+++ b/lib/ansible/modules/systemd.py
@@ -13,23 +13,26 @@ module: systemd
author:
- Ansible Core Team
version_added: "2.2"
-short_description: Manage services
+short_description: Manage systemd units
description:
- - Controls systemd services on remote hosts.
+ - Controls systemd units (services, timers, and so on) on remote hosts.
options:
name:
description:
- - Name of the service. This parameter takes the name of exactly one service to work with.
- - When using in a chroot environment you always need to specify the full name i.e. (crond.service).
+ - Name of the unit. This parameter takes the name of exactly one unit to work with.
+ - When no extension is given, it is implied to a C(.service) as systemd.
+ - When using in a chroot environment you always need to specify the name of the unit with the extension. For example, C(crond.service).
+ type: str
aliases: [ service, unit ]
state:
description:
- C(started)/C(stopped) are idempotent actions that will not run commands unless necessary.
- C(restarted) will always bounce the service. C(reloaded) will always reload.
+ C(restarted) will always bounce the unit. C(reloaded) will always reload.
+ type: str
choices: [ reloaded, restarted, started, stopped ]
enabled:
description:
- - Whether the service should start on boot. B(At least one of state and enabled are required.)
+ - Whether the unit should start on boot. B(At least one of state and enabled are required.)
type: bool
force:
description:
@@ -43,7 +46,7 @@ options:
daemon_reload:
description:
- Run daemon-reload before doing any other operations, to make sure systemd has read any changes.
- - When set to C(yes), runs daemon-reload even if the module does not start or stop anything.
+ - When set to C(true), runs daemon-reload even if the module does not start or stop anything.
type: bool
default: no
aliases: [ daemon-reload ]
@@ -54,20 +57,16 @@ options:
default: no
aliases: [ daemon-reexec ]
version_added: "2.8"
- user:
- description:
- - (deprecated) run ``systemctl`` talking to the service manager of the calling user, rather than the service manager
- of the system.
- - This option is deprecated and will eventually be removed in 2.11. The ``scope`` option should be used instead.
- - The default value is C(false).
- type: bool
scope:
description:
- Run systemctl within a given service manager scope, either as the default system scope C(system),
the current user's scope C(user), or the scope of all users C(global).
- - "For systemd to work with 'user', the executing user must have its own instance of dbus started (systemd requirement).
- The user dbus process is normally started during normal login, but not during the run of Ansible tasks.
+ - "For systemd to work with 'user', the executing user must have its own instance of dbus started and accessible (systemd requirement)."
+ - "The user dbus process is normally started during normal login, but not during the run of Ansible tasks.
Otherwise you will probably get a 'Failed to connect to bus: no such file or directory' error."
+ - The user must have access, normally given via setting the ``XDG_RUNTIME_DIR`` variable, see example below.
+
+ type: str
choices: [ system, user, global ]
default: system
version_added: "2.7"
@@ -79,9 +78,9 @@ options:
default: no
version_added: "2.3"
notes:
- - Since 2.4, one of the following options is required 'state', 'enabled', 'masked', 'daemon_reload', ('daemon_reexec' since 2.8),
- and all except 'daemon_reload' (and 'daemon_reexec' since 2.8) also require 'name'.
- - Before 2.4 you always required 'name'.
+ - Since 2.4, one of the following options is required C(state), C(enabled), C(masked), C(daemon_reload), (C(daemon_reexec) since 2.8),
+ and all except C(daemon_reload) and (C(daemon_reexec) since 2.8) also require C(name).
+ - Before 2.4 you always required C(name).
- Globs are not supported in name, i.e ``postgres*.service``.
- Supports C(check_mode).
requirements:
@@ -89,7 +88,7 @@ requirements:
'''
EXAMPLES = '''
-- name: Make sure a service is running
+- name: Make sure a service unit is running
ansible.builtin.systemd:
state: started
name: httpd
@@ -107,7 +106,7 @@ EXAMPLES = '''
- name: Reload service httpd, in all cases
ansible.builtin.systemd:
- name: httpd
+ name: httpd.service
state: reloaded
- name: Enable service httpd and ensure it is not masked
@@ -116,7 +115,7 @@ EXAMPLES = '''
enabled: yes
masked: no
-- name: Enable a timer for dnf-automatic
+- name: Enable a timer unit for dnf-automatic
ansible.builtin.systemd:
name: dnf-automatic.timer
state: started
@@ -129,6 +128,14 @@ EXAMPLES = '''
- name: Just force systemd to re-execute itself (2.8 and above)
ansible.builtin.systemd:
daemon_reexec: yes
+
+- name: Run a user service when XDG_RUNTIME_DIR is not set on remote login
+ ansible.builtin.systemd:
+ name: myservice
+ state: started
+ scope: user
+ environment:
+ XDG_RUNTIME_DIR: "/run/user/{{ myuid }}"
'''
RETURN = '''
@@ -330,7 +337,6 @@ def main():
masked=dict(type='bool'),
daemon_reload=dict(type='bool', default=False, aliases=['daemon-reload']),
daemon_reexec=dict(type='bool', default=False, aliases=['daemon-reexec']),
- user=dict(type='bool'),
scope=dict(type='str', default='system', choices=['system', 'user', 'global']),
no_block=dict(type='bool', default=False),
),
@@ -341,7 +347,6 @@ def main():
enabled=('name', ),
masked=('name', ),
),
- mutually_exclusive=[['scope', 'user']],
)
unit = module.params['name']
@@ -356,14 +361,6 @@ def main():
os.environ['XDG_RUNTIME_DIR'] = '/run/user/%s' % os.geteuid()
''' Set CLI options depending on params '''
- if module.params['user'] is not None:
- # handle user deprecation, mutually exclusive with scope
- module.deprecate("The 'user' option is being replaced by 'scope'", version='2.11', collection_name='ansible.builtin')
- if module.params['user']:
- module.params['scope'] = 'user'
- else:
- module.params['scope'] = 'system'
-
# if scope is 'system' or None, we can ignore as there is no extra switch.
# The other choices match the corresponding switch
if module.params['scope'] != 'system':
@@ -500,7 +497,6 @@ def main():
elif rc == 1:
# if not a user or global user service and both init script and unit file exist stdout should have enabled/disabled, otherwise use rc entries
if module.params['scope'] == 'system' and \
- not module.params['user'] and \
is_initd and \
not out.strip().endswith('disabled') and \
sysv_is_enabled(unit):
diff --git a/lib/ansible/modules/sysvinit.py b/lib/ansible/modules/sysvinit.py
index 309bb054..c598cf6a 100644
--- a/lib/ansible/modules/sysvinit.py
+++ b/lib/ansible/modules/sysvinit.py
@@ -22,12 +22,14 @@ options:
required: true
description:
- Name of the service.
+ type: str
aliases: ['service']
state:
choices: [ 'started', 'stopped', 'restarted', 'reloaded' ]
description:
- C(started)/C(stopped) are idempotent actions that will not run commands unless necessary.
Not all init scripts support C(restarted) nor C(reloaded) natively, so these will both trigger a stop and start as needed.
+ type: str
enabled:
type: bool
description:
@@ -37,18 +39,23 @@ options:
description:
- If the service is being C(restarted) or C(reloaded) then sleep this many seconds between the stop and start command.
This helps to workaround badly behaving services.
+ type: int
pattern:
description:
- A substring to look for as would be found in the output of the I(ps) command as a stand-in for a status result.
- If the string is found, the service will be assumed to be running.
- "This option is mainly for use with init scripts that don't support the 'status' option."
+ type: str
runlevels:
description:
- The runlevels this script should be enabled/disabled from.
- Use this to override the defaults set by the package or init script itself.
+ type: list
+ elements: str
arguments:
description:
- Additional arguments provided on the command line that some init scripts accept.
+ type: str
aliases: [ 'args' ]
daemonize:
type: bool
@@ -124,7 +131,7 @@ def main():
sleep=dict(type='int', default=1),
pattern=dict(type='str'),
arguments=dict(type='str', aliases=['args']),
- runlevels=dict(type='list'),
+ runlevels=dict(type='list', elements='str'),
daemonize=dict(type='bool', default=False),
),
supports_check_mode=True,
diff --git a/lib/ansible/modules/unarchive.py b/lib/ansible/modules/unarchive.py
index d35a7786..4867f8d7 100644
--- a/lib/ansible/modules/unarchive.py
+++ b/lib/ansible/modules/unarchive.py
@@ -39,7 +39,7 @@ options:
required: true
copy:
description:
- - If true, the file is copied from local 'master' to the target machine, otherwise, the plugin will look for src archive at the target machine.
+ - If true, the file is copied from local controller to the managed (remote) node, otherwise, the plugin will look for src archive on the managed machine.
- This option has been deprecated in favor of C(remote_src).
- This option is mutually exclusive with C(remote_src).
type: bool
@@ -58,8 +58,20 @@ options:
exclude:
description:
- List the directory and file entries that you would like to exclude from the unarchive action.
+ - Mutually exclusive with C(include).
type: list
+ default: []
+ elements: str
version_added: "2.1"
+ include:
+ description:
+ - List of directory and file entries that you would like to extract from the archive. Only
+ files listed here will be extracted.
+ - Mutually exclusive with C(exclude).
+ type: list
+ default: []
+ elements: str
+ version_added: "2.11"
keep_newer:
description:
- Do not replace existing files that are newer than files from the archive.
@@ -72,6 +84,7 @@ options:
- Each space-separated command-line option should be a new element of the array. See examples.
- Command-line options with multiple elements must use multiple lines in the array, one for each element.
type: list
+ elements: str
default: ""
version_added: "2.1"
remote_src:
@@ -97,8 +110,9 @@ todo:
- Re-implement zip support using native zipfile module.
notes:
- Requires C(zipinfo) and C(gtar)/C(unzip) command on target host.
- - Can handle I(.zip) files using C(unzip) as well as I(.tar), I(.tar.gz), I(.tar.bz2) and I(.tar.xz) files using C(gtar).
- - Does not handle I(.gz) files, I(.bz2) files or I(.xz) files that do not contain a I(.tar) archive.
+ - Requires C(zstd) command on target host to expand I(.tar.zst) files.
+ - Can handle I(.zip) files using C(unzip) as well as I(.tar), I(.tar.gz), I(.tar.bz2), I(.tar.xz), and I(.tar.zst) files using C(gtar).
+ - Does not handle I(.gz) files, I(.bz2) files, I(.xz), or I(.zst) files that do not contain a I(.tar) archive.
- Uses gtar's C(--diff) arg to calculate if changed or not. If this C(arg) is not
supported, it will always unpack the archive.
- Existing files/directories in the destination which are not in the archive
@@ -140,6 +154,66 @@ EXAMPLES = r'''
- s/^xxx/yyy/
'''
+RETURN = r'''
+dest:
+ description: Path to the destination directory.
+ returned: always
+ type: str
+ sample: /opt/software
+files:
+ description: List of all the files in the archive.
+ returned: When I(list_files) is True
+ type: list
+ sample: '["file1", "file2"]'
+gid:
+ description: Numerical ID of the group that owns the destination directory.
+ returned: always
+ type: int
+ sample: 1000
+group:
+ description: Name of the group that owns the destination directory.
+ returned: always
+ type: str
+ sample: "librarians"
+handler:
+ description: Archive software handler used to extract and decompress the archive.
+ returned: always
+ type: str
+ sample: "TgzArchive"
+mode:
+ description: String that represents the octal permissions of the destination directory.
+ returned: always
+ type: str
+ sample: "0755"
+owner:
+ description: Name of the user that owns the destination directory.
+ returned: always
+ type: str
+ sample: "paul"
+size:
+ description: The size of destination directory in bytes. Does not include the size of files or subdirectories contained within.
+ returned: always
+ type: int
+ sample: 36
+src:
+ description:
+ - The source archive's path.
+ - If I(src) was a remote web URL, or from the local ansible controller, this shows the temporary location where the download was stored.
+ returned: always
+ type: str
+ sample: "/home/paul/test.tar.gz"
+state:
+ description: State of the destination. Effectively always "directory".
+ returned: always
+ type: str
+ sample: "directory"
+uid:
+ description: Numerical ID of the user that owns the destination directory.
+ returned: always
+ type: int
+ sample: 1000
+'''
+
import binascii
import codecs
import datetime
@@ -155,6 +229,7 @@ import traceback
from zipfile import ZipFile, BadZipfile
from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.urls import fetch_file
from ansible.module_utils._text import to_bytes, to_native, to_text
@@ -203,8 +278,9 @@ class ZipArchive(object):
self.module = module
self.excludes = module.params['exclude']
self.includes = []
- self.cmd_path = self.module.get_bin_path('unzip')
- self.zipinfocmd_path = self.module.get_bin_path('zipinfo')
+ self.include_files = self.module.params['include']
+ self.cmd_path = None
+ self.zipinfo_cmd_path = None
self._files_in_archive = []
self._infodict = dict()
@@ -276,14 +352,19 @@ class ZipArchive(object):
else:
try:
for member in archive.namelist():
- exclude_flag = False
- if self.excludes:
- for exclude in self.excludes:
- if fnmatch.fnmatch(member, exclude):
- exclude_flag = True
- break
- if not exclude_flag:
- self._files_in_archive.append(to_native(member))
+ if self.include_files:
+ for include in self.include_files:
+ if fnmatch.fnmatch(member, include):
+ self._files_in_archive.append(to_native(member))
+ else:
+ exclude_flag = False
+ if self.excludes:
+ for exclude in self.excludes:
+ if not fnmatch.fnmatch(member, exclude):
+ exclude_flag = True
+ break
+ if not exclude_flag:
+ self._files_in_archive.append(to_native(member))
except Exception:
archive.close()
raise UnarchiveError('Unable to list files in the archive')
@@ -293,9 +374,12 @@ class ZipArchive(object):
def is_unarchived(self):
# BSD unzip doesn't support zipinfo listings with timestamp.
- cmd = [self.zipinfocmd_path, '-T', '-s', self.src]
+ cmd = [self.zipinfo_cmd_path, '-T', '-s', self.src]
+
if self.excludes:
cmd.extend(['-x', ] + self.excludes)
+ if self.include_files:
+ cmd.extend(self.include_files)
rc, out, err = self.module.run_command(cmd)
old_out = out
@@ -331,8 +415,8 @@ class ZipArchive(object):
tpw = pwd.getpwnam(self.file_args['owner'])
except KeyError:
try:
- tpw = pwd.getpwuid(self.file_args['owner'])
- except (TypeError, KeyError):
+ tpw = pwd.getpwuid(int(self.file_args['owner']))
+ except (TypeError, KeyError, ValueError):
tpw = pwd.getpwuid(run_uid)
fut_owner = tpw.pw_name
fut_uid = tpw.pw_uid
@@ -350,7 +434,9 @@ class ZipArchive(object):
tgr = grp.getgrnam(self.file_args['group'])
except (ValueError, KeyError):
try:
- tgr = grp.getgrgid(self.file_args['group'])
+ # no need to check isdigit() explicitly here, if we fail to
+ # parse, the ValueError will be caught.
+ tgr = grp.getgrgid(int(self.file_args['group']))
except (KeyError, ValueError, OverflowError):
tgr = grp.getgrgid(run_gid)
fut_group = tgr.gr_name
@@ -602,13 +688,27 @@ class ZipArchive(object):
# cmd.extend(map(shell_escape, self.includes))
if self.excludes:
cmd.extend(['-x'] + self.excludes)
+ if self.include_files:
+ cmd.extend(self.include_files)
cmd.extend(['-d', self.b_dest])
rc, out, err = self.module.run_command(cmd)
return dict(cmd=cmd, rc=rc, out=out, err=err)
def can_handle_archive(self):
- if not self.cmd_path:
- return False, 'Command "unzip" not found.'
+ binaries = (
+ ('unzip', 'cmd_path'),
+ ('zipinfo', 'zipinfo_cmd_path'),
+ )
+ missing = []
+ for b in binaries:
+ try:
+ setattr(self, b[1], get_bin_path(b[0]))
+ except ValueError:
+ missing.append(b[0])
+
+ if missing:
+ return False, "Unable to find required '{missing}' binary in the path.".format(missing="' or '".join(missing))
+
cmd = [self.cmd_path, '-l', self.src]
rc, out, err = self.module.run_command(cmd)
if rc == 0:
@@ -627,19 +727,12 @@ class TgzArchive(object):
if self.module.check_mode:
self.module.exit_json(skipped=True, msg="remote module (%s) does not support check mode when using gtar" % self.module._name)
self.excludes = [path.rstrip('/') for path in self.module.params['exclude']]
- # Prefer gtar (GNU tar) as it supports the compression options -z, -j and -J
- self.cmd_path = self.module.get_bin_path('gtar', None)
- if not self.cmd_path:
- # Fallback to tar
- self.cmd_path = self.module.get_bin_path('tar')
+ self.include_files = self.module.params['include']
+ self.cmd_path = None
+ self.tar_type = None
self.zipflag = '-z'
self._files_in_archive = []
- if self.cmd_path:
- self.tar_type = self._get_tar_type()
- else:
- self.tar_type = None
-
def _get_tar_type(self):
cmd = [self.cmd_path, '--version']
(rc, out, err) = self.module.run_command(cmd)
@@ -663,8 +756,10 @@ class TgzArchive(object):
if self.excludes:
cmd.extend(['--exclude=' + f for f in self.excludes])
cmd.extend(['-f', self.src])
- rc, out, err = self.module.run_command(cmd, cwd=self.b_dest, environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
+ if self.include_files:
+ cmd.extend(self.include_files)
+ rc, out, err = self.module.run_command(cmd, cwd=self.b_dest, environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
if rc != 0:
raise UnarchiveError('Unable to list files in the archive')
@@ -706,6 +801,8 @@ class TgzArchive(object):
if self.excludes:
cmd.extend(['--exclude=' + f for f in self.excludes])
cmd.extend(['-f', self.src])
+ if self.include_files:
+ cmd.extend(self.include_files)
rc, out, err = self.module.run_command(cmd, cwd=self.b_dest, environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
# Check whether the differences are in something that we're
@@ -757,12 +854,23 @@ class TgzArchive(object):
if self.excludes:
cmd.extend(['--exclude=' + f for f in self.excludes])
cmd.extend(['-f', self.src])
+ if self.include_files:
+ cmd.extend(self.include_files)
rc, out, err = self.module.run_command(cmd, cwd=self.b_dest, environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
return dict(cmd=cmd, rc=rc, out=out, err=err)
def can_handle_archive(self):
- if not self.cmd_path:
- return False, 'Commands "gtar" and "tar" not found.'
+ # Prefer gtar (GNU tar) as it supports the compression options -z, -j and -J
+ try:
+ self.cmd_path = get_bin_path('gtar')
+ except ValueError:
+ # Fallback to tar
+ try:
+ self.cmd_path = get_bin_path('tar')
+ except ValueError:
+ return False, "Unable to find required 'gtar' or 'tar' binary in the path"
+
+ self.tar_type = self._get_tar_type()
if self.tar_type != 'gnu':
return False, 'Command "%s" detected as tar type %s. GNU tar required.' % (self.cmd_path, self.tar_type)
@@ -799,9 +907,22 @@ class TarXzArchive(TgzArchive):
self.zipflag = '-J'
+# Class to handle zstd compressed tar files
+class TarZstdArchive(TgzArchive):
+ def __init__(self, src, b_dest, file_args, module):
+ super(TarZstdArchive, self).__init__(src, b_dest, file_args, module)
+ # GNU Tar supports the --use-compress-program option to
+ # specify which executable to use for
+ # compression/decompression.
+ #
+ # Note: some flavors of BSD tar support --zstd (e.g., FreeBSD
+ # 12.2), but the TgzArchive class only supports GNU Tar.
+ self.zipflag = '--use-compress-program=zstd'
+
+
# try handlers in order and return the one that works or bail if none work
def pick_handler(src, dest, file_args, module):
- handlers = [ZipArchive, TgzArchive, TarArchive, TarBzipArchive, TarXzArchive]
+ handlers = [ZipArchive, TgzArchive, TarArchive, TarBzipArchive, TarXzArchive, TarZstdArchive]
reasons = set()
for handler in handlers:
obj = handler(src, dest, file_args, module)
@@ -823,13 +944,15 @@ def main():
creates=dict(type='path'),
list_files=dict(type='bool', default=False),
keep_newer=dict(type='bool', default=False),
- exclude=dict(type='list', default=[]),
- extra_opts=dict(type='list', default=[]),
+ exclude=dict(type='list', elements='str', default=[]),
+ include=dict(type='list', elements='str', default=[]),
+ extra_opts=dict(type='list', elements='str', default=[]),
validate_certs=dict(type='bool', default=True),
),
add_file_common_args=True,
# check-mode only works for zip files, we cover that later
supports_check_mode=True,
+ mutually_exclusive=[('include', 'exclude')],
)
src = module.params['src']
diff --git a/lib/ansible/modules/uri.py b/lib/ansible/modules/uri.py
index f13ddb47..c21c45f2 100644
--- a/lib/ansible/modules/uri.py
+++ b/lib/ansible/modules/uri.py
@@ -107,6 +107,7 @@ options:
description:
- A list of valid, numeric, HTTP status codes that signifies success of the request.
type: list
+ elements: int
default: [ 200 ]
timeout:
description:
@@ -140,6 +141,11 @@ options:
- If I(client_cert) contains both the certificate and key, this option is not required.
type: path
version_added: '2.4'
+ ca_path:
+ description:
+ - PEM formatted file that contains a CA certificate to be used for validation
+ type: path
+ version_added: '2.11'
src:
description:
- Path to file to be submitted to the remote server.
@@ -148,8 +154,8 @@ options:
version_added: '2.7'
remote_src:
description:
- - If C(no), the module will search for src on originating/master machine.
- - If C(yes) the module will use the C(src) path on the remote/target machine.
+ - If C(no), the module will search for the C(src) on the controller node.
+ - If C(yes), the module will search for the C(src) on the managed (remote) node.
type: bool
default: no
version_added: '2.7'
@@ -168,12 +174,24 @@ options:
unix_socket:
description:
- Path to Unix domain socket to use for connection
+ type: path
version_added: '2.8'
http_agent:
description:
- Header to identify as, generally appears in web server logs.
type: str
default: ansible-httpget
+ use_gssapi:
+ description:
+ - Use GSSAPI to perform the authentication, typically this is for Kerberos or Kerberos through Negotiate
+ authentication.
+ - Requires the Python library L(gssapi,https://github.com/pythongssapi/python-gssapi) to be installed.
+ - Credentials for GSSAPI can be specified with I(url_username)/I(url_password) or with the GSSAPI env var
+ C(KRB5CCNAME) that specified a custom Kerberos credential cache.
+ - NTLM authentication is C(not) supported even if the GSSAPI mech for NTLM has been installed.
+ type: bool
+ default: no
+ version_added: '2.11'
notes:
- The dependency on httplib2 was removed in Ansible 2.1.
- The module returns all the HTTP headers in lower-case.
@@ -535,13 +553,12 @@ def form_urlencoded(body):
return body
-def uri(module, url, dest, body, body_format, method, headers, socket_timeout):
+def uri(module, url, dest, body, body_format, method, headers, socket_timeout, ca_path):
# is dest is set and is a directory, let's check if we get redirected and
# set the filename from that url
redirected = False
redir_info = {}
r = {}
-
src = module.params['src']
if src:
try:
@@ -581,6 +598,7 @@ def uri(module, url, dest, body, body_format, method, headers, socket_timeout):
resp, info = fetch_url(module, url, data=data, headers=headers,
method=method, timeout=socket_timeout, unix_socket=module.params['unix_socket'],
+ ca_path=ca_path,
**kwargs)
try:
@@ -618,11 +636,12 @@ def main():
follow_redirects=dict(type='str', default='safe', choices=['all', 'no', 'none', 'safe', 'urllib2', 'yes']),
creates=dict(type='path'),
removes=dict(type='path'),
- status_code=dict(type='list', default=[200]),
+ status_code=dict(type='list', elements='int', default=[200]),
timeout=dict(type='int', default=30),
headers=dict(type='dict', default={}),
unix_socket=dict(type='path'),
remote_src=dict(type='bool', default=False),
+ ca_path=dict(type='path', default=None),
)
module = AnsibleModule(
@@ -645,7 +664,7 @@ def main():
removes = module.params['removes']
status_code = [int(x) for x in list(module.params['status_code'])]
socket_timeout = module.params['timeout']
-
+ ca_path = module.params['ca_path']
dict_headers = module.params['headers']
if not re.match('^[A-Z]+$', method):
@@ -689,7 +708,7 @@ def main():
# Make the request
start = datetime.datetime.utcnow()
resp, content, dest = uri(module, url, dest, body, body_format, method,
- dict_headers, socket_timeout)
+ dict_headers, socket_timeout, ca_path)
resp['elapsed'] = (datetime.datetime.utcnow() - start).seconds
resp['status'] = int(resp['status'])
resp['changed'] = False
diff --git a/lib/ansible/modules/user.py b/lib/ansible/modules/user.py
index 583dfe22..71aa448b 100644
--- a/lib/ansible/modules/user.py
+++ b/lib/ansible/modules/user.py
@@ -57,6 +57,7 @@ options:
the user is removed from all groups except the primary group.
- Before Ansible 2.3, the only input format allowed was a comma separated string.
type: list
+ elements: str
append:
description:
- If C(yes), add the user to the groups specified in C(groups).
@@ -69,8 +70,8 @@ options:
- Optionally set the user's shell.
- On macOS, before Ansible 2.5, the default shell for non-system users was C(/usr/bin/false).
Since Ansible 2.5, the default shell for non-system users on macOS is C(/bin/bash).
- - On other operating systems, the default shell is determined by the underlying tool being
- used. See Notes for details.
+ - See notes for details on how other operating systems determine the default shell by
+ the underlying tool.
type: str
home:
description:
@@ -88,7 +89,7 @@ options:
- On macOS systems, this value has to be cleartext. Beware of security issues.
- To create a disabled account on Linux systems, set this to C('!') or C('*').
- To create a disabled account on OpenBSD, set this to C('*************').
- - See U(https://docs.ansible.com/ansible/faq.html#how-do-i-generate-encrypted-passwords-for-the-user-module)
+ - See L(FAQ entry,https://docs.ansible.com/ansible/latest/reference_appendices/faq.html#how-do-i-generate-encrypted-passwords-for-the-user-module)
for details on various ways to generate these password values.
type: str
state:
@@ -237,6 +238,19 @@ options:
- Currently supported on Illumos/Solaris.
type: str
version_added: "2.8"
+ password_expire_max:
+ description:
+ - Maximum number of days between password change.
+ - Supported on Linux only.
+ type: int
+ version_added: "2.11"
+ password_expire_min:
+ description:
+ - Minimum number of days between password change.
+ - Supported on Linux only.
+ type: int
+ version_added: "2.11"
+
notes:
- There are specific requirements per platform on user management utilities. However
they generally come pre-installed with the system and Ansible will require they
@@ -298,6 +312,16 @@ EXAMPLES = r'''
ansible.builtin.user:
name: james18
expires: -1
+
+- name: Set maximum expiration date for password
+ user:
+ name: ram19
+ password_expire_max: 10
+
+- name: Set minimum expiration date for password
+ user:
+ name: pushkar15
+ password_expire_min: 5
'''
RETURN = r'''
@@ -399,6 +423,16 @@ uid:
returned: When I(uid) is passed to the module
type: int
sample: 1044
+password_expire_max:
+ description: Maximum number of days during which a password is valid.
+ returned: When user exists
+ type: int
+ sample: 20
+password_expire_min:
+ description: Minimum number of days between password change
+ returned: When user exists
+ type: int
+ sample: 20
'''
@@ -493,6 +527,8 @@ class User(object):
self.profile = module.params['profile']
self.authorization = module.params['authorization']
self.role = module.params['role']
+ self.password_expire_max = module.params['password_expire_max']
+ self.password_expire_min = module.params['password_expire_min']
if module.params['groups'] is not None:
self.groups = ','.join(module.params['groups'])
@@ -988,6 +1024,30 @@ class User(object):
info[1] = self.user_password()[0]
return info
+ def set_password_expire_max(self):
+ command_name = 'chage'
+ cmd = [self.module.get_bin_path(command_name, True)]
+ cmd.append('-M')
+ cmd.append(self.password_expire_max)
+ cmd.append(self.name)
+ if self.password_expire_max == spwd.getspnam(self.name).sp_max:
+ self.module.exit_json(changed=False)
+ else:
+ self.execute_command(cmd)
+ self.module.exit_json(changed=True)
+
+ def set_password_expire_min(self):
+ command_name = 'chage'
+ cmd = [self.module.get_bin_path(command_name, True)]
+ cmd.append('-m')
+ cmd.append(self.password_expire_min)
+ cmd.append(self.name)
+ if self.password_expire_min == spwd.getspnam(self.name).sp_min:
+ self.module.exit_json(changed=False)
+ else:
+ self.execute_command(cmd)
+ self.module.exit_json(changed=True)
+
def user_password(self):
passwd = ''
expires = ''
@@ -2950,12 +3010,14 @@ def main():
uid=dict(type='int'),
non_unique=dict(type='bool', default=False),
group=dict(type='str'),
- groups=dict(type='list'),
+ groups=dict(type='list', elements='str'),
comment=dict(type='str'),
home=dict(type='path'),
shell=dict(type='str'),
password=dict(type='str', no_log=True),
login_class=dict(type='str'),
+ password_expire_max=dict(type='int', no_log=False),
+ password_expire_min=dict(type='int', no_log=False),
# following options are specific to macOS
hidden=dict(type='bool'),
# following options are specific to selinux
@@ -3095,6 +3157,16 @@ def main():
result['ssh_key_file'] = user.get_ssh_key_path()
result['ssh_public_key'] = user.get_ssh_public_key()
+ # deal with password expire max
+ if user.password_expire_max:
+ if user.user_exists():
+ (rc, out, err) = user.set_password_expire_max()
+
+ # deal with password expire min
+ if user.password_expire_min:
+ if user.user_exists():
+ (rc, out, err) = user.set_password_expire_min()
+
module.exit_json(**result)
diff --git a/lib/ansible/modules/validate_argument_spec.py b/lib/ansible/modules/validate_argument_spec.py
new file mode 100644
index 00000000..76942aa6
--- /dev/null
+++ b/lib/ansible/modules/validate_argument_spec.py
@@ -0,0 +1,96 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: validate_argument_spec
+short_description: Validate role argument specs.
+description:
+ - This module validates role arguments with a defined argument specification.
+version_added: "2.11"
+options:
+ argument_spec:
+ description:
+ - A dictionary like AnsibleModule argument_spec
+ required: true
+ provided_arguments:
+ description:
+ - A dictionary of the arguments that will be validated according to argument_spec
+author:
+ - Ansible Core Team
+'''
+
+EXAMPLES = r'''
+- name: verify vars needed for this task file are present when included
+ validate_argument_spec:
+ argument_spec: '{{required_data}}'
+ vars:
+ required_data:
+ # unlike spec file, just put the options in directly
+ stuff:
+ description: stuff
+ type: str
+ choices: ['who', 'knows', 'what']
+ default: what
+ but:
+ description: i guess we need one
+ type: str
+ required: true
+
+
+- name: verify vars needed for this task file are present when included, with spec from a spec file
+ validate_argument_spec:
+ argument_spec: "{{lookup('file', 'myargspec.yml')['specname']['options']}}"
+
+
+- name: verify vars needed for next include and not from inside it, also with params i'll only define there
+ block:
+ - validate_argument_spec:
+ argument_spec: "{{lookup('file', 'nakedoptions.yml'}}"
+ provided_arguments:
+ but: "that i can define on the include itself, like in it's `vars:` keyword"
+
+ - name: the include itself
+ vars:
+ stuff: knows
+ but: nobuts!
+'''
+
+RETURN = r'''
+argument_errors:
+ description: A list of arg validation errors.
+ returned: failure
+ type: list
+ elements: str
+ sample:
+ - "error message 1"
+ - "error message 2"
+
+argument_spec_data:
+ description: A dict of the data from the 'argument_spec' arg.
+ returned: failure
+ type: dict
+ sample:
+ some_arg:
+ type: "str"
+ some_other_arg:
+ type: "int"
+ required: true
+
+validate_args_context:
+ description: A dict of info about where validate_args_spec was used
+ type: dict
+ returned: always
+ sample:
+ name: my_role
+ type: role
+ path: /home/user/roles/my_role/
+ argument_spec_name: main
+'''
diff --git a/lib/ansible/modules/wait_for.py b/lib/ansible/modules/wait_for.py
index b855030c..428ff9bd 100644
--- a/lib/ansible/modules/wait_for.py
+++ b/lib/ansible/modules/wait_for.py
@@ -56,6 +56,7 @@ options:
description:
- The list of TCP connection states which are counted as active connections.
type: list
+ elements: str
default: [ ESTABLISHED, FIN_WAIT1, FIN_WAIT2, SYN_RECV, SYN_SENT, TIME_WAIT ]
version_added: "2.3"
state:
@@ -83,6 +84,7 @@ options:
description:
- List of hosts or IPs to ignore when looking for active TCP connections for C(drained) state.
type: list
+ elements: str
version_added: "1.8"
sleep:
description:
@@ -367,28 +369,33 @@ class LinuxTCPConnectionInfo(TCPConnectionInfo):
for family in self.source_file.keys():
if not os.path.isfile(self.source_file[family]):
continue
- f = open(self.source_file[family])
- for tcp_connection in f.readlines():
- tcp_connection = tcp_connection.strip().split()
- if tcp_connection[self.local_address_field] == 'local_address':
- continue
- if (tcp_connection[self.connection_state_field] not in
- [get_connection_state_id(_connection_state) for _connection_state in self.module.params['active_connection_states']]):
- continue
- (local_ip, local_port) = tcp_connection[self.local_address_field].split(':')
- if self.port != local_port:
- continue
- (remote_ip, remote_port) = tcp_connection[self.remote_address_field].split(':')
- if (family, remote_ip) in self.exclude_ips:
- continue
- if any((
- (family, local_ip) in self.ips,
- (family, self.match_all_ips[family]) in self.ips,
- local_ip.startswith(self.ipv4_mapped_ipv6_address['prefix']) and
- (family, self.ipv4_mapped_ipv6_address['match_all']) in self.ips,
- )):
- active_connections += 1
- f.close()
+ try:
+ f = open(self.source_file[family])
+ for tcp_connection in f.readlines():
+ tcp_connection = tcp_connection.strip().split()
+ if tcp_connection[self.local_address_field] == 'local_address':
+ continue
+ if (tcp_connection[self.connection_state_field] not in
+ [get_connection_state_id(_connection_state) for _connection_state in self.module.params['active_connection_states']]):
+ continue
+ (local_ip, local_port) = tcp_connection[self.local_address_field].split(':')
+ if self.port != local_port:
+ continue
+ (remote_ip, remote_port) = tcp_connection[self.remote_address_field].split(':')
+ if (family, remote_ip) in self.exclude_ips:
+ continue
+ if any((
+ (family, local_ip) in self.ips,
+ (family, self.match_all_ips[family]) in self.ips,
+ local_ip.startswith(self.ipv4_mapped_ipv6_address['prefix']) and
+ (family, self.ipv4_mapped_ipv6_address['match_all']) in self.ips,
+ )):
+ active_connections += 1
+ except IOError as e:
+ pass
+ finally:
+ f.close()
+
return active_connections
@@ -466,11 +473,11 @@ def main():
connect_timeout=dict(type='int', default=5),
delay=dict(type='int', default=0),
port=dict(type='int'),
- active_connection_states=dict(type='list', default=['ESTABLISHED', 'FIN_WAIT1', 'FIN_WAIT2', 'SYN_RECV', 'SYN_SENT', 'TIME_WAIT']),
+ active_connection_states=dict(type='list', elements='str', default=['ESTABLISHED', 'FIN_WAIT1', 'FIN_WAIT2', 'SYN_RECV', 'SYN_SENT', 'TIME_WAIT']),
path=dict(type='path'),
search_regex=dict(type='str'),
state=dict(type='str', default='started', choices=['absent', 'drained', 'present', 'started', 'stopped']),
- exclude_hosts=dict(type='list'),
+ exclude_hosts=dict(type='list', elements='str'),
sleep=dict(type='int', default=1),
msg=dict(type='str'),
),
@@ -649,11 +656,9 @@ def main():
end = start + datetime.timedelta(seconds=timeout)
tcpconns = TCPConnectionInfo(module)
while datetime.datetime.utcnow() < end:
- try:
- if tcpconns.get_active_connections_count() == 0:
- break
- except IOError:
- pass
+ if tcpconns.get_active_connections_count() == 0:
+ break
+
# Conditions not yet met, wait and try again
time.sleep(module.params['sleep'])
else:
diff --git a/lib/ansible/modules/yum.py b/lib/ansible/modules/yum.py
index d63fd52a..d417394a 100644
--- a/lib/ansible/modules/yum.py
+++ b/lib/ansible/modules/yum.py
@@ -28,10 +28,12 @@ options:
- By default, this module will select the backend based on the C(ansible_pkg_mgr) fact.
default: "auto"
choices: [ auto, yum, yum4, dnf ]
+ type: str
version_added: "2.7"
name:
description:
- A package name or package specifier with version, like C(name-1.0).
+ - Comparison operators for package version are valid here C(>), C(<), C(>=), C(<=). Example - C(name>=1.0)
- If a previous version is specified, the task also needs to turn C(allow_downgrade) on.
See the C(allow_downgrade) documentation for caveats with downgrading packages.
- When using state=latest, this can be C('*') which means run C(yum -y update).
@@ -43,12 +45,15 @@ options:
exclude:
description:
- Package name(s) to exclude when state=present, or latest
+ type: list
+ elements: str
version_added: "2.0"
list:
description:
- "Package name to run the equivalent of yum list --show-duplicates <package> against. In addition to listing packages,
use can also list the following: C(installed), C(updates), C(available) and C(repos)."
- This parameter is mutually exclusive with C(name).
+ type: str
state:
description:
- Whether to install (C(present) or C(installed), C(latest)), or remove (C(absent) or C(removed)) a package.
@@ -57,6 +62,7 @@ options:
- C(absent) and C(removed) will remove the specified package.
- Default is C(None), however in effect the default action is C(present) unless the C(autoremove) option is
enabled for this module, then C(absent) is inferred.
+ type: str
choices: [ absent, installed, latest, present, removed ]
enablerepo:
description:
@@ -65,6 +71,8 @@ options:
When specifying multiple repos, separate them with a C(",").
- As of Ansible 2.7, this can alternatively be a list instead of C(",")
separated string
+ type: list
+ elements: str
version_added: "0.9"
disablerepo:
description:
@@ -73,10 +81,13 @@ options:
When specifying multiple repos, separate them with a C(",").
- As of Ansible 2.7, this can alternatively be a list instead of C(",")
separated string
+ type: list
+ elements: str
version_added: "0.9"
conf_file:
description:
- The remote yum configuration file to use for the transaction.
+ type: str
version_added: "0.6"
disable_gpg_check:
description:
@@ -121,6 +132,7 @@ options:
- Specifies an alternative installroot, relative to which all packages
will be installed.
default: "/"
+ type: str
version_added: "2.3"
security:
description:
@@ -132,6 +144,7 @@ options:
description:
- If set to C(yes), and C(state=latest) then only installs updates that have been marked bugfix related.
default: "no"
+ type: bool
version_added: "2.6"
allow_downgrade:
description:
@@ -150,16 +163,21 @@ options:
description:
- I(Plugin) name to enable for the install/update operation.
The enabled plugin will not persist beyond the transaction.
+ type: list
+ elements: str
version_added: "2.5"
disable_plugin:
description:
- I(Plugin) name to disable for the install/update operation.
The disabled plugins will not persist beyond the transaction.
+ type: list
+ elements: str
version_added: "2.5"
releasever:
description:
- Specifies an alternative release from which all packages will be
installed.
+ type: str
version_added: "2.7"
autoremove:
description:
@@ -176,6 +194,7 @@ options:
- If set to C(all), disables all excludes.
- If set to C(main), disable excludes defined in [main] in yum.conf.
- If set to C(repoid), disable excludes defined for given repo id.
+ type: str
version_added: "2.7"
download_only:
description:
@@ -259,6 +278,11 @@ EXAMPLES = '''
name: httpd
state: latest
+- name: Install Apache >= 2.4
+ yum:
+ name: httpd>=2.4
+ state: present
+
- name: Install a list of packages (suitable replacement for 2.11 loop deprecation warning)
yum:
name:
@@ -346,6 +370,7 @@ EXAMPLES = '''
'''
from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.respawn import has_respawned, respawn_module
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.yumdnf import YumDnf, yumdnf_argument_spec
@@ -353,6 +378,7 @@ from ansible.module_utils.yumdnf import YumDnf, yumdnf_argument_spec
import errno
import os
import re
+import sys
import tempfile
try:
@@ -1574,6 +1600,10 @@ class YumModule(YumDnf):
actually execute the module code backend
"""
+ if (not HAS_RPM_PYTHON or not HAS_YUM_PYTHON) and sys.executable != '/usr/bin/python' and not has_respawned():
+ respawn_module('/usr/bin/python')
+ # end of the line for this process; we'll exit here once the respawned module has completed
+
error_msgs = []
if not HAS_RPM_PYTHON:
error_msgs.append('The Python 2 bindings for rpm are needed for this module. If you require Python 3 support use the `dnf` Ansible module instead.')
diff --git a/lib/ansible/modules/yum_repository.py b/lib/ansible/modules/yum_repository.py
index 71e67576..7b5ac23d 100644
--- a/lib/ansible/modules/yum_repository.py
+++ b/lib/ansible/modules/yum_repository.py
@@ -34,6 +34,7 @@ options:
throttling will be disabled. If I(throttle) is expressed as a data rate
(bytes/sec) then this option is ignored. Default is C(0) (no bandwidth
throttling).
+ type: str
default: 0
baseurl:
description:
@@ -42,10 +43,13 @@ options:
- It can also be a list of multiple URLs.
- This, the I(metalink) or I(mirrorlist) parameters are required if I(state) is set to
C(present).
+ type: list
+ elements: str
cost:
description:
- Relative cost of accessing this repository. Useful for weighing one
repo's packages as greater/less than any other.
+ type: str
default: 1000
deltarpm_metadata_percentage:
description:
@@ -54,34 +58,39 @@ options:
can give values over C(100), so C(200) means that the metadata is
required to be half the size of the packages. Use C(0) to turn off
this check, and always download metadata.
+ type: str
default: 100
deltarpm_percentage:
description:
- When the relative size of delta vs pkg is larger than this, delta is
not used. Use C(0) to turn off delta rpm processing. Local repositories
(with file:// I(baseurl)) have delta rpms turned off by default.
+ type: str
default: 75
description:
description:
- A human readable string describing the repository. This option corresponds to the "name" property in the repo file.
- This parameter is only required if I(state) is set to C(present).
+ type: str
enabled:
description:
- This tells yum whether or not use this repository.
+ - Yum default value is C(true).
type: bool
- default: 'yes'
enablegroups:
description:
- Determines whether yum will allow the use of package groups for this
repository.
+ - Yum default value is C(true).
type: bool
- default: 'yes'
exclude:
description:
- List of packages to exclude from updates or installs. This should be a
space separated list. Shell globs using wildcards (eg. C(*) and C(?))
are allowed.
- The list can also be a regular YAML array.
+ type: list
+ elements: str
failovermethod:
choices: [roundrobin, priority]
default: roundrobin
@@ -91,13 +100,16 @@ options:
contacting the host.
- C(priority) starts from the first I(baseurl) listed and reads through
them sequentially.
+ type: str
file:
description:
- File name without the C(.repo) extension to save the repo in. Defaults
to the value of I(name).
+ type: str
gpgcakey:
description:
- A URL pointing to the ASCII-armored CA key file for the repository.
+ type: str
gpgcheck:
description:
- Tells yum whether or not it should perform a GPG signature check on
@@ -109,6 +121,14 @@ options:
description:
- A URL pointing to the ASCII-armored GPG key file for the repository.
- It can also be a list of multiple URLs.
+ type: list
+ elements: str
+ module_hotfixes:
+ description:
+ - Disable module RPM filtering and make all RPMs from the repository
+ available. The default is C(None).
+ version_added: '2.11'
+ type: bool
http_caching:
description:
- Determines how upstream HTTP caches are instructed to handle any HTTP
@@ -118,6 +138,7 @@ options:
not repository metadata downloads).
- C(none) means that no HTTP downloads should be cached.
choices: [all, packages, none]
+ type: str
default: all
include:
description:
@@ -125,6 +146,7 @@ options:
supported. Configuration file will be inserted at the position of the
I(include=) line. Included files may contain further include lines.
Yum will abort with an error if an inclusion loop is detected.
+ type: str
includepkgs:
description:
- List of packages you want to only use from a repository. This should be
@@ -132,12 +154,15 @@ options:
are allowed. Substitution variables (e.g. C($releasever)) are honored
here.
- The list can also be a regular YAML array.
+ type: list
+ elements: str
ip_resolve:
description:
- Determines how yum resolves host names.
- C(4) or C(IPv4) - resolve to IPv4 addresses only.
- C(6) or C(IPv6) - resolve to IPv6 addresses only.
choices: [4, 6, IPv4, IPv6, whatever]
+ type: str
default: whatever
keepalive:
description:
@@ -151,11 +176,13 @@ options:
- Either C(1) or C(0). Determines whether or not yum keeps the cache of
headers and packages after successful installation.
choices: ['0', '1']
+ type: str
default: '1'
metadata_expire:
description:
- Time (in seconds) after which the metadata will expire.
- Default value is 6 hours.
+ type: str
default: 21600
metadata_expire_filter:
description:
@@ -176,6 +203,7 @@ options:
I(yum check-update).
- Note that this option does not override "yum clean expire-cache".
choices: [never, 'read-only:past', 'read-only:present', 'read-only:future']
+ type: str
default: 'read-only:present'
metalink:
description:
@@ -184,31 +212,37 @@ options:
mirrors for the repomd.xml file to a I(baseurl).
- This, the I(baseurl) or I(mirrorlist) parameters are required if I(state) is set to
C(present).
+ type: str
mirrorlist:
description:
- Specifies a URL to a file containing a list of baseurls.
- This, the I(baseurl) or I(metalink) parameters are required if I(state) is set to
C(present).
+ type: str
mirrorlist_expire:
description:
- Time (in seconds) after which the mirrorlist locally cached will
expire.
- Default value is 6 hours.
+ type: str
default: 21600
name:
description:
- Unique repository ID. This option builds the section name of the repository in the repo file.
- This parameter is only required if I(state) is set to C(present) or
C(absent).
+ type: str
required: true
password:
description:
- Password to use with the username for basic authentication.
+ type: str
priority:
description:
- Enforce ordered protection of repositories. The value is an integer
from 1 to 99.
- This option only works if the YUM Priorities plugin is installed.
+ type: str
default: 99
protect:
description:
@@ -219,12 +253,15 @@ options:
description:
- URL to the proxy server that yum should use. Set to C(_none_) to
disable the global proxy setting.
+ type: str
proxy_password:
description:
- Password for this proxy.
+ type: str
proxy_username:
description:
- Username to use for proxy.
+ type: str
repo_gpgcheck:
description:
- This tells yum whether or not it should perform a GPG signature check
@@ -234,11 +271,13 @@ options:
reposdir:
description:
- Directory where the C(.repo) files will be stored.
+ type: path
default: /etc/yum.repos.d
retries:
description:
- Set the number of times any attempt to retrieve a file should retry
before returning an error. Setting this to C(0) makes yum try forever.
+ type: str
default: 10
s3_enabled:
description:
@@ -267,16 +306,19 @@ options:
description:
- Path to the directory containing the databases of the certificate
authorities yum should use to verify SSL certificates.
+ type: str
aliases: [ ca_cert ]
sslclientcert:
description:
- Path to the SSL client certificate yum should use to connect to
repos/remote sites.
+ type: str
aliases: [ client_cert ]
sslclientkey:
description:
- Path to the SSL client key yum should use to connect to repos/remote
sites.
+ type: str
aliases: [ client_key ]
sslverify:
description:
@@ -288,25 +330,30 @@ options:
description:
- State of the repo file.
choices: [absent, present]
+ type: str
default: present
throttle:
description:
- Enable bandwidth throttling for downloads.
- This option can be expressed as a absolute data rate in bytes/sec. An
SI prefix (k, M or G) may be appended to the bandwidth value.
+ type: str
timeout:
description:
- Number of seconds to wait for a connection before timing out.
+ type: str
default: 30
ui_repoid_vars:
description:
- When a repository id is displayed, append these yum variables to the
string if they are used in the I(baseurl)/etc. Variables are appended
in the order listed (and found).
+ type: str
default: releasever basearch
username:
description:
- Username to use for basic authentication to a repo or really any url.
+ type: str
extends_documentation_fragment:
- files
@@ -409,6 +456,7 @@ class YumRepo(object):
'gpgcakey',
'gpgcheck',
'gpgkey',
+ 'module_hotfixes',
'http_caching',
'include',
'includepkgs',
@@ -541,22 +589,23 @@ def main():
# Module settings
argument_spec = dict(
bandwidth=dict(),
- baseurl=dict(type='list'),
+ baseurl=dict(type='list', elements='str'),
cost=dict(),
deltarpm_metadata_percentage=dict(),
deltarpm_percentage=dict(),
description=dict(),
enabled=dict(type='bool'),
enablegroups=dict(type='bool'),
- exclude=dict(type='list'),
+ exclude=dict(type='list', elements='str'),
failovermethod=dict(choices=['roundrobin', 'priority']),
file=dict(),
- gpgcakey=dict(),
+ gpgcakey=dict(no_log=False),
gpgcheck=dict(type='bool'),
- gpgkey=dict(type='list'),
+ gpgkey=dict(type='list', elements='str', no_log=False),
+ module_hotfixes=dict(type='bool'),
http_caching=dict(choices=['all', 'packages', 'none']),
include=dict(),
- includepkgs=dict(type='list'),
+ includepkgs=dict(type='list', elements='str'),
ip_resolve=dict(choices=['4', '6', 'IPv4', 'IPv6', 'whatever']),
keepalive=dict(type='bool'),
keepcache=dict(choices=['0', '1']),
@@ -586,7 +635,7 @@ def main():
sslcacert=dict(aliases=['ca_cert']),
ssl_check_cert_permissions=dict(type='bool'),
sslclientcert=dict(aliases=['client_cert']),
- sslclientkey=dict(aliases=['client_key']),
+ sslclientkey=dict(aliases=['client_key'], no_log=False),
sslverify=dict(type='bool', aliases=['validate_certs']),
state=dict(choices=['present', 'absent'], default='present'),
throttle=dict(),
@@ -595,7 +644,7 @@ def main():
username=dict(),
)
- argument_spec['async'] = dict(type='bool')
+ argument_spec['async'] = dict(type='bool', default=True)
module = AnsibleModule(
argument_spec=argument_spec,
diff --git a/lib/ansible/parsing/dataloader.py b/lib/ansible/parsing/dataloader.py
index 4b7bddff..c9ed836d 100644
--- a/lib/ansible/parsing/dataloader.py
+++ b/lib/ansible/parsing/dataloader.py
@@ -198,20 +198,34 @@ class DataLoader:
''' imperfect role detection, roles are still valid w/o tasks|meta/main.yml|yaml|etc '''
b_path = to_bytes(path, errors='surrogate_or_strict')
+ b_path_dirname = os.path.dirname(b_path)
b_upath = to_bytes(unfrackpath(path, follow=False), errors='surrogate_or_strict')
- for b_finddir in (b'meta', b'tasks'):
- for b_suffix in (b'.yml', b'.yaml', b''):
- b_main = b'main%s' % (b_suffix)
- b_tasked = os.path.join(b_finddir, b_main)
-
- if (
- RE_TASKS.search(path) and
- os.path.exists(os.path.join(b_path, b_main)) or
- os.path.exists(os.path.join(b_upath, b_tasked)) or
- os.path.exists(os.path.join(os.path.dirname(b_path), b_tasked))
- ):
- return True
+ untasked_paths = (
+ os.path.join(b_path, b'main.yml'),
+ os.path.join(b_path, b'main.yaml'),
+ os.path.join(b_path, b'main'),
+ )
+ tasked_paths = (
+ os.path.join(b_upath, b'tasks/main.yml'),
+ os.path.join(b_upath, b'tasks/main.yaml'),
+ os.path.join(b_upath, b'tasks/main'),
+ os.path.join(b_upath, b'meta/main.yml'),
+ os.path.join(b_upath, b'meta/main.yaml'),
+ os.path.join(b_upath, b'meta/main'),
+ os.path.join(b_path_dirname, b'tasks/main.yml'),
+ os.path.join(b_path_dirname, b'tasks/main.yaml'),
+ os.path.join(b_path_dirname, b'tasks/main'),
+ os.path.join(b_path_dirname, b'meta/main.yml'),
+ os.path.join(b_path_dirname, b'meta/main.yaml'),
+ os.path.join(b_path_dirname, b'meta/main'),
+ )
+
+ exists_untasked = map(os.path.exists, untasked_paths)
+ exists_tasked = map(os.path.exists, tasked_paths)
+ if RE_TASKS.search(path) and any(exists_untasked) or any(exists_tasked):
+ return True
+
return False
def path_dwim_relative(self, path, dirname, source, is_role=False):
diff --git a/lib/ansible/parsing/mod_args.py b/lib/ansible/parsing/mod_args.py
index ed9865cb..96e6b857 100644
--- a/lib/ansible/parsing/mod_args.py
+++ b/lib/ansible/parsing/mod_args.py
@@ -132,9 +132,9 @@ class ModuleArgsParser:
tokens = split_args(module_string)
if len(tokens) > 1:
- return (tokens[0], " ".join(tokens[1:]))
+ return (tokens[0].strip(), " ".join(tokens[1:]))
else:
- return (tokens[0], "")
+ return (tokens[0].strip(), "")
def _normalize_parameters(self, thing, action=None, additional_args=None):
'''
diff --git a/lib/ansible/parsing/splitter.py b/lib/ansible/parsing/splitter.py
index b5209b01..ab7df04d 100644
--- a/lib/ansible/parsing/splitter.py
+++ b/lib/ansible/parsing/splitter.py
@@ -87,9 +87,8 @@ def parse_kv(args, check_raw=False):
k = x[:pos]
v = x[pos + 1:]
- # FIXME: make the retrieval of this list of shell/command
- # options a function, so the list is centralized
- if check_raw and k not in ('creates', 'removes', 'chdir', 'executable', 'warn'):
+ # FIXME: make the retrieval of this list of shell/command options a function, so the list is centralized
+ if check_raw and k not in ('creates', 'removes', 'chdir', 'executable', 'warn', 'stdin', 'stdin_add_newline', 'strip_empty_ends'):
raw_params.append(orig_x)
else:
options[k.strip()] = unquote(v.strip())
diff --git a/lib/ansible/parsing/utils/yaml.py b/lib/ansible/parsing/utils/yaml.py
index 8dd0550e..91e37f95 100644
--- a/lib/ansible/parsing/utils/yaml.py
+++ b/lib/ansible/parsing/utils/yaml.py
@@ -13,7 +13,7 @@ from yaml import YAMLError
from ansible.errors import AnsibleParserError
from ansible.errors.yaml_strings import YAML_SYNTAX_ERROR
-from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils._text import to_native
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
from ansible.parsing.ajson import AnsibleJSONDecoder
@@ -36,10 +36,11 @@ def _handle_error(json_exc, yaml_exc, file_name, show_content):
err_obj = AnsibleBaseYAMLObject()
err_obj.ansible_pos = (file_name, yaml_exc.problem_mark.line + 1, yaml_exc.problem_mark.column + 1)
- err_msg = 'We were unable to read either as JSON nor YAML, these are the errors we got from each:\n' \
- 'JSON: %s\n\n' % to_text(json_exc) + YAML_SYNTAX_ERROR % getattr(yaml_exc, 'problem', '')
+ n_yaml_syntax_error = YAML_SYNTAX_ERROR % to_native(getattr(yaml_exc, 'problem', u''))
+ n_err_msg = 'We were unable to read either as JSON nor YAML, these are the errors we got from each:\n' \
+ 'JSON: %s\n\n%s' % (to_native(json_exc), n_yaml_syntax_error)
- raise AnsibleParserError(to_native(err_msg), obj=err_obj, show_content=show_content, orig_exc=yaml_exc)
+ raise AnsibleParserError(n_err_msg, obj=err_obj, show_content=show_content, orig_exc=yaml_exc)
def _safe_load(stream, file_name=None, vault_secrets=None):
diff --git a/lib/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py
index 6cf5dc72..bd99c69d 100644
--- a/lib/ansible/parsing/vault/__init__.py
+++ b/lib/ansible/parsing/vault/__init__.py
@@ -649,7 +649,7 @@ class VaultLib:
vault_id=vault_id)
return b_vaulttext
- def decrypt(self, vaulttext, filename=None):
+ def decrypt(self, vaulttext, filename=None, obj=None):
'''Decrypt a piece of vault encrypted data.
:arg vaulttext: a string to decrypt. Since vault encrypted data is an
@@ -660,10 +660,10 @@ class VaultLib:
:returns: a byte string containing the decrypted data and the vault-id that was used
'''
- plaintext, vault_id, vault_secret = self.decrypt_and_get_vault_id(vaulttext, filename=filename)
+ plaintext, vault_id, vault_secret = self.decrypt_and_get_vault_id(vaulttext, filename=filename, obj=obj)
return plaintext
- def decrypt_and_get_vault_id(self, vaulttext, filename=None):
+ def decrypt_and_get_vault_id(self, vaulttext, filename=None, obj=None):
"""Decrypt a piece of vault encrypted data.
:arg vaulttext: a string to decrypt. Since vault encrypted data is an
@@ -680,7 +680,7 @@ class VaultLib:
raise AnsibleVaultError("A vault password must be specified to decrypt data")
if not is_encrypted(b_vaulttext):
- msg = "input is not vault encrypted data"
+ msg = "input is not vault encrypted data. "
if filename:
msg += "%s is not a vault encrypted file" % to_native(filename)
raise AnsibleError(msg)
@@ -750,11 +750,12 @@ class VaultLib:
)
break
except AnsibleVaultFormatError as exc:
+ exc.obj = obj
msg = u"There was a vault format error"
if filename:
msg += u' in %s' % (to_text(filename))
- msg += u': %s' % exc
- display.warning(msg)
+ msg += u': %s' % to_text(exc)
+ display.warning(msg, formatted=True)
raise
except AnsibleError as e:
display.vvvv(u'Tried to use the vault secret (%s) to decrypt (%s) but it failed. Error: %s' %
@@ -1037,7 +1038,10 @@ class VaultEditor:
try:
if filename == '-':
- data = sys.stdin.read()
+ if PY3:
+ data = sys.stdin.buffer.read()
+ else:
+ data = sys.stdin.read()
else:
with open(filename, "rb") as fh:
data = fh.read()
diff --git a/lib/ansible/parsing/yaml/constructor.py b/lib/ansible/parsing/yaml/constructor.py
index 208286e4..4b795787 100644
--- a/lib/ansible/parsing/yaml/constructor.py
+++ b/lib/ansible/parsing/yaml/constructor.py
@@ -24,11 +24,10 @@ from yaml.nodes import MappingNode
from ansible import constants as C
from ansible.module_utils._text import to_bytes, to_native
-from ansible.parsing.yaml.objects import AnsibleMapping, AnsibleSequence, AnsibleUnicode
-from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
-from ansible.utils.unsafe_proxy import wrap_var
+from ansible.parsing.yaml.objects import AnsibleMapping, AnsibleSequence, AnsibleUnicode, AnsibleVaultEncryptedUnicode
from ansible.parsing.vault import VaultLib
from ansible.utils.display import Display
+from ansible.utils.unsafe_proxy import wrap_var
display = Display()
@@ -112,6 +111,7 @@ class AnsibleConstructor(SafeConstructor):
note=None)
ret = AnsibleVaultEncryptedUnicode(b_ciphertext_data)
ret.vault = vault
+ ret.ansible_pos = self._node_position_info(node)
return ret
def construct_yaml_seq(self, node):
@@ -121,7 +121,16 @@ class AnsibleConstructor(SafeConstructor):
data.ansible_pos = self._node_position_info(node)
def construct_yaml_unsafe(self, node):
- return wrap_var(self.construct_yaml_str(node))
+ try:
+ constructor = getattr(node, 'id', 'object')
+ if constructor is not None:
+ constructor = getattr(self, 'construct_%s' % constructor)
+ except AttributeError:
+ constructor = self.construct_object
+
+ value = constructor(node)
+
+ return wrap_var(value)
def _node_position_info(self, node):
# the line number where the previous token has ended (plus empty lines)
diff --git a/lib/ansible/parsing/yaml/objects.py b/lib/ansible/parsing/yaml/objects.py
index 9c93006d..3da84471 100644
--- a/lib/ansible/parsing/yaml/objects.py
+++ b/lib/ansible/parsing/yaml/objects.py
@@ -117,7 +117,7 @@ class AnsibleVaultEncryptedUnicode(Sequence, AnsibleBaseYAMLObject):
def data(self):
if not self.vault:
return to_text(self._ciphertext)
- return to_text(self.vault.decrypt(self._ciphertext))
+ return to_text(self.vault.decrypt(self._ciphertext, obj=self))
@data.setter
def data(self, value):
diff --git a/lib/ansible/playbook/base.py b/lib/ansible/playbook/base.py
index df045928..5fc05089 100644
--- a/lib/ansible/playbook/base.py
+++ b/lib/ansible/playbook/base.py
@@ -545,6 +545,13 @@ class FieldAttributeBase(with_metaclass(BaseMeta, object)):
else:
setattr(self, attr, value)
+ # from_attrs is only used to create a finalized task
+ # from attrs from the Worker/TaskExecutor
+ # Those attrs are finalized and squashed in the TE
+ # and controller side use needs to reflect that
+ self._finalized = True
+ self._squashed = True
+
def serialize(self):
'''
Serializes the object derived from the base object into
diff --git a/lib/ansible/playbook/block.py b/lib/ansible/playbook/block.py
index e3a4e1c7..3c8ecc76 100644
--- a/lib/ansible/playbook/block.py
+++ b/lib/ansible/playbook/block.py
@@ -38,7 +38,8 @@ class Block(Base, Conditional, CollectionSearch, Taggable):
_rescue = FieldAttribute(isa='list', default=list, inherit=False)
_always = FieldAttribute(isa='list', default=list, inherit=False)
- # other fields
+ # other fields for task compat
+ _notify = FieldAttribute(isa='list')
_delegate_to = FieldAttribute(isa='string')
_delegate_facts = FieldAttribute(isa='bool')
@@ -54,9 +55,6 @@ class Block(Base, Conditional, CollectionSearch, Taggable):
self._use_handlers = use_handlers
self._implicit = implicit
- # end of role flag
- self._eor = False
-
if task_include:
self._parent = task_include
elif parent_block:
@@ -203,7 +201,6 @@ class Block(Base, Conditional, CollectionSearch, Taggable):
new_me = super(Block, self).copy()
new_me._play = self._play
new_me._use_handlers = self._use_handlers
- new_me._eor = self._eor
if self._dep_chain is not None:
new_me._dep_chain = self._dep_chain[:]
@@ -236,7 +233,6 @@ class Block(Base, Conditional, CollectionSearch, Taggable):
data[attr] = getattr(self, attr)
data['dep_chain'] = self.get_dep_chain()
- data['eor'] = self._eor
if self._role is not None:
data['role'] = self._role.serialize()
@@ -263,7 +259,6 @@ class Block(Base, Conditional, CollectionSearch, Taggable):
setattr(self, attr, data.get(attr))
self._dep_chain = data.get('dep_chain', None)
- self._eor = data.get('eor', False)
# if there was a serialized role, unpack it too
role_data = data.get('role')
@@ -375,7 +370,7 @@ class Block(Base, Conditional, CollectionSearch, Taggable):
filtered_block = evaluate_block(task)
if filtered_block.has_tasks():
tmp_list.append(filtered_block)
- elif (task.action in C._ACTION_META or
+ elif ((task.action in C._ACTION_META and task.implicit) or
(task.action in C._ACTION_INCLUDE and task.evaluate_tags([], self._play.skip_tags, all_vars=all_vars)) or
task.evaluate_tags(self._play.only_tags, self._play.skip_tags, all_vars=all_vars)):
tmp_list.append(task)
diff --git a/lib/ansible/playbook/conditional.py b/lib/ansible/playbook/conditional.py
index a969d1a7..08219e43 100644
--- a/lib/ansible/playbook/conditional.py
+++ b/lib/ansible/playbook/conditional.py
@@ -28,7 +28,7 @@ from jinja2.exceptions import UndefinedError
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleUndefinedVariable
from ansible.module_utils.six import text_type
-from ansible.module_utils._text import to_native
+from ansible.module_utils._text import to_native, to_text
from ansible.playbook.attribute import FieldAttribute
from ansible.utils.display import Display
@@ -88,16 +88,30 @@ class Conditional:
if hasattr(self, '_ds'):
ds = getattr(self, '_ds')
+ result = True
try:
for conditional in self.when:
- if not self._check_conditional(conditional, templar, all_vars):
- return False
+
+ # do evaluation
+ if conditional is None or conditional == '':
+ res = True
+ elif isinstance(conditional, bool):
+ res = conditional
+ else:
+ res = self._check_conditional(conditional, templar, all_vars)
+
+ # only update if still true, preserve false
+ if result:
+ result = res
+
+ display.debug("Evaluated conditional (%s): %s" % (conditional, res))
+ if not result:
+ break
+
except Exception as e:
- raise AnsibleError(
- "The conditional check '%s' failed. The error was: %s" % (to_native(conditional), to_native(e)), obj=ds
- )
+ raise AnsibleError("The conditional check '%s' failed. The error was: %s" % (to_native(conditional), to_native(e)), obj=ds)
- return True
+ return result
def _check_conditional(self, conditional, templar, all_vars):
'''
@@ -107,12 +121,6 @@ class Conditional:
'''
original = conditional
- if conditional is None or conditional == '':
- return True
-
- # this allows for direct boolean assignments to conditionals "when: False"
- if isinstance(conditional, bool):
- return conditional
if templar.is_template(conditional):
display.warning('conditional statements should not include jinja2 '
@@ -133,8 +141,10 @@ class Conditional:
disable_lookups = hasattr(conditional, '__UNSAFE__')
conditional = templar.template(conditional, disable_lookups=disable_lookups)
if bare_vars_warning and not isinstance(conditional, bool):
- display.deprecated('evaluating %r as a bare variable, this behaviour will go away and you might need to add |bool'
- ' to the expression in the future. Also see CONDITIONAL_BARE_VARS configuration toggle' % original,
+ display.deprecated('evaluating %r as a bare variable, this behaviour will go away and you might need to add " | bool"'
+ ' (if you would like to evaluate input string from prompt) or " is truthy"'
+ ' (if you would like to apply Python\'s evaluation method) to the expression in the future. '
+ 'Also see CONDITIONAL_BARE_VARS configuration toggle' % original,
version="2.12", collection_name='ansible.builtin')
if not isinstance(conditional, text_type) or conditional == "":
return conditional
@@ -172,12 +182,8 @@ class Conditional:
inside_yield=inside_yield
)
try:
- e = templar.environment.overlay()
- e.filters.update(templar.environment.filters)
- e.tests.update(templar.environment.tests)
-
- res = e._parse(conditional, None, None)
- res = generate(res, e, None, None)
+ res = templar.environment.parse(conditional, None, None)
+ res = generate(res, templar.environment, None, None)
parsed = ast.parse(res, mode='exec')
cnv = CleansingNodeVisitor()
@@ -186,8 +192,15 @@ class Conditional:
raise AnsibleError("Invalid conditional detected: %s" % to_native(e))
# and finally we generate and template the presented string and look at the resulting string
+ # NOTE The spaces around True and False are intentional to short-circuit safe_eval and avoid
+ # its expensive calls.
presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
- val = templar.template(presented, disable_lookups=disable_lookups).strip()
+ # NOTE Convert the result to text to account for both native and non-native jinja.
+ # NOTE The templated result of `presented` is string on native jinja as well prior to Python 3.10.
+ # ast.literal_eval on Python 3.10 removes leading whitespaces so " True " becomes bool True
+ # as opposed to Python 3.9 and lower where the same would result in IndentationError and
+ # string " True " would be returned by Templar.
+ val = to_text(templar.template(presented, disable_lookups=disable_lookups)).strip()
if val == "True":
return True
elif val == "False":
diff --git a/lib/ansible/playbook/helpers.py b/lib/ansible/playbook/helpers.py
index 7f049018..a31f9104 100644
--- a/lib/ansible/playbook/helpers.py
+++ b/lib/ansible/playbook/helpers.py
@@ -123,7 +123,7 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h
except AnsibleParserError as e:
# if the raises exception was created with obj=ds args, then it includes the detail
# so we dont need to add it so we can just re raise.
- if e._obj:
+ if e.obj:
raise
# But if it wasn't, we can add the yaml object now to get more detail
raise AnsibleParserError(to_native(e), obj=task_ds, orig_exc=e)
diff --git a/lib/ansible/playbook/included_file.py b/lib/ansible/playbook/included_file.py
index 2d209deb..36108c07 100644
--- a/lib/ansible/playbook/included_file.py
+++ b/lib/ansible/playbook/included_file.py
@@ -24,6 +24,7 @@ import os
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_text
+from ansible.playbook.handler import Handler
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.role_include import IncludeRole
from ansible.template import Templar
@@ -114,60 +115,63 @@ class IncludedFile:
if original_task.action in C._ACTION_ALL_INCLUDE_TASKS:
include_file = None
- if original_task:
- if original_task.static:
- continue
-
- if original_task._parent:
- # handle relative includes by walking up the list of parent include
- # tasks and checking the relative result to see if it exists
- parent_include = original_task._parent
- cumulative_path = None
- while parent_include is not None:
- if not isinstance(parent_include, TaskInclude):
- parent_include = parent_include._parent
- continue
- if isinstance(parent_include, IncludeRole):
- parent_include_dir = parent_include._role_path
- else:
+ if original_task.static:
+ continue
+
+ if original_task._parent:
+ # handle relative includes by walking up the list of parent include
+ # tasks and checking the relative result to see if it exists
+ parent_include = original_task._parent
+ cumulative_path = None
+ while parent_include is not None:
+ if not isinstance(parent_include, TaskInclude):
+ parent_include = parent_include._parent
+ continue
+ if isinstance(parent_include, IncludeRole):
+ parent_include_dir = parent_include._role_path
+ else:
+ try:
+ parent_include_dir = os.path.dirname(templar.template(parent_include.args.get('_raw_params')))
+ except AnsibleError as e:
+ parent_include_dir = ''
+ display.warning(
+ 'Templating the path of the parent %s failed. The path to the '
+ 'included file may not be found. '
+ 'The error was: %s.' % (original_task.action, to_text(e))
+ )
+ if cumulative_path is not None and not os.path.isabs(cumulative_path):
+ cumulative_path = os.path.join(parent_include_dir, cumulative_path)
+ else:
+ cumulative_path = parent_include_dir
+ include_target = templar.template(include_result['include'])
+ if original_task._role:
+ new_basedir = os.path.join(original_task._role._role_path, 'tasks', cumulative_path)
+ candidates = [loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_target),
+ loader.path_dwim_relative(new_basedir, 'tasks', include_target)]
+ for include_file in candidates:
try:
- parent_include_dir = os.path.dirname(templar.template(parent_include.args.get('_raw_params')))
- except AnsibleError as e:
- parent_include_dir = ''
- display.warning(
- 'Templating the path of the parent %s failed. The path to the '
- 'included file may not be found. '
- 'The error was: %s.' % (original_task.action, to_text(e))
- )
- if cumulative_path is not None and not os.path.isabs(cumulative_path):
- cumulative_path = os.path.join(parent_include_dir, cumulative_path)
- else:
- cumulative_path = parent_include_dir
- include_target = templar.template(include_result['include'])
- if original_task._role:
- new_basedir = os.path.join(original_task._role._role_path, 'tasks', cumulative_path)
- candidates = [loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_target),
- loader.path_dwim_relative(new_basedir, 'tasks', include_target)]
- for include_file in candidates:
- try:
- # may throw OSError
- os.stat(include_file)
- # or select the task file if it exists
- break
- except OSError:
- pass
- else:
- include_file = loader.path_dwim_relative(loader.get_basedir(), cumulative_path, include_target)
-
- if os.path.exists(include_file):
- break
- else:
- parent_include = parent_include._parent
+ # may throw OSError
+ os.stat(include_file)
+ # or select the task file if it exists
+ break
+ except OSError:
+ pass
+ else:
+ include_file = loader.path_dwim_relative(loader.get_basedir(), cumulative_path, include_target)
+
+ if os.path.exists(include_file):
+ break
+ else:
+ parent_include = parent_include._parent
if include_file is None:
if original_task._role:
include_target = templar.template(include_result['include'])
- include_file = loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_target)
+ include_file = loader.path_dwim_relative(
+ original_task._role._role_path,
+ 'handlers' if isinstance(original_task, Handler) else 'tasks',
+ include_target,
+ is_role=True)
else:
include_file = loader.path_dwim(include_result['include'])
diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py
index 35dfc558..13206875 100644
--- a/lib/ansible/playbook/play.py
+++ b/lib/ansible/playbook/play.py
@@ -272,6 +272,9 @@ class Play(Base, Taggable, CollectionSearch):
loader=self._loader
)
+ for task in flush_block.block:
+ task.implicit = True
+
block_list = []
block_list.extend(self.pre_tasks)
diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py
index d9bdc2fd..c328a8c0 100644
--- a/lib/ansible/playbook/play_context.py
+++ b/lib/ansible/playbook/play_context.py
@@ -102,15 +102,6 @@ class PlayContext(Base):
# docker FIXME: remove these
_docker_extra_args = FieldAttribute(isa='string')
- # ssh # FIXME: remove these
- _ssh_executable = FieldAttribute(isa='string', default=C.ANSIBLE_SSH_EXECUTABLE)
- _ssh_args = FieldAttribute(isa='string', default=C.ANSIBLE_SSH_ARGS)
- _ssh_common_args = FieldAttribute(isa='string')
- _sftp_extra_args = FieldAttribute(isa='string')
- _scp_extra_args = FieldAttribute(isa='string')
- _ssh_extra_args = FieldAttribute(isa='string')
- _ssh_transfer_method = FieldAttribute(isa='string', default=C.DEFAULT_SSH_TRANSFER_METHOD)
-
# ???
_connection_lockfd = FieldAttribute(isa='int')
@@ -171,7 +162,7 @@ class PlayContext(Base):
if option:
flag = options[option].get('name')
if flag:
- setattr(self, flag, self.connection.get_option(flag))
+ setattr(self, flag, plugin.get_option(flag))
def set_attributes_from_play(self, play):
self.force_handlers = play.force_handlers
@@ -189,10 +180,6 @@ class PlayContext(Base):
# For now, they are likely to be moved to FieldAttribute defaults
self.private_key_file = context.CLIARGS.get('private_key_file') # Else default
self.verbosity = context.CLIARGS.get('verbosity') # Else default
- self.ssh_common_args = context.CLIARGS.get('ssh_common_args') # Else default
- self.ssh_extra_args = context.CLIARGS.get('ssh_extra_args') # Else default
- self.sftp_extra_args = context.CLIARGS.get('sftp_extra_args') # Else default
- self.scp_extra_args = context.CLIARGS.get('scp_extra_args') # Else default
# Not every cli that uses PlayContext has these command line args so have a default
self.start_at_task = context.CLIARGS.get('start_at_task', None) # Else default
@@ -394,7 +381,7 @@ class PlayContext(Base):
if self._attributes['connection'] == 'smart':
conn_type = 'ssh'
# see if SSH can support ControlPersist if not use paramiko
- if not check_for_controlpersist(self.ssh_executable) and paramiko is not None:
+ if not check_for_controlpersist('ssh') and paramiko is not None:
conn_type = "paramiko"
# if someone did `connection: persistent`, default it to using a persistent paramiko connection to avoid problems
diff --git a/lib/ansible/playbook/playbook_include.py b/lib/ansible/playbook/playbook_include.py
index 01c98031..5b966902 100644
--- a/lib/ansible/playbook/playbook_include.py
+++ b/lib/ansible/playbook/playbook_include.py
@@ -23,6 +23,7 @@ import os
import ansible.constants as C
from ansible.errors import AnsibleParserError, AnsibleAssertionError
+from ansible.module_utils._text import to_bytes
from ansible.module_utils.six import iteritems, string_types
from ansible.parsing.splitter import split_args, parse_kv
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
@@ -30,6 +31,8 @@ from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.conditional import Conditional
from ansible.playbook.taggable import Taggable
+from ansible.utils.collection_loader import AnsibleCollectionConfig
+from ansible.utils.collection_loader._collection_finder import _get_collection_name_from_path, _get_collection_playbook_path
from ansible.template import Templar
from ansible.utils.display import Display
@@ -70,10 +73,29 @@ class PlaybookInclude(Base, Conditional, Taggable):
pb = Playbook(loader=loader)
file_name = templar.template(new_obj.import_playbook)
- if not os.path.isabs(file_name):
- file_name = os.path.join(basedir, file_name)
- pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager, vars=self.vars.copy())
+ # check for FQCN
+ resource = _get_collection_playbook_path(file_name)
+ if resource is not None:
+ playbook = resource[1]
+ playbook_collection = resource[2]
+ else:
+ # not FQCN try path
+ playbook = file_name
+ if not os.path.isabs(playbook):
+ playbook = os.path.join(basedir, playbook)
+
+ # might still be collection playbook
+ playbook_collection = _get_collection_name_from_path(playbook)
+
+ if playbook_collection:
+ # it is a collection playbook, setup default collections
+ AnsibleCollectionConfig.default_collection = playbook_collection
+ else:
+ # it is NOT a collection playbook, setup adjecent paths
+ AnsibleCollectionConfig.playbook_paths.append(os.path.dirname(os.path.abspath(to_bytes(playbook, errors='surrogate_or_strict'))))
+
+ pb._load_playbook_data(file_name=playbook, variable_manager=variable_manager, vars=self.vars.copy())
# finally, update each loaded playbook entry with any variables specified
# on the included playbook and/or any tags which may have been set
@@ -91,7 +113,7 @@ class PlaybookInclude(Base, Conditional, Taggable):
entry.vars = temp_vars
entry.tags = list(set(entry.tags).union(new_obj.tags))
if entry._included_path is None:
- entry._included_path = os.path.dirname(file_name)
+ entry._included_path = os.path.dirname(playbook)
# Check to see if we need to forward the conditionals on to the included
# plays. If so, we can take a shortcut here and simply prepend them to
diff --git a/lib/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py
index 30162c3a..25c3b167 100644
--- a/lib/ansible/playbook/role/__init__.py
+++ b/lib/ansible/playbook/role/__init__.py
@@ -21,6 +21,7 @@ __metaclass__ = type
import os
+from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleAssertionError
from ansible.module_utils._text import to_text
from ansible.module_utils.six import iteritems, binary_type, text_type
@@ -34,10 +35,8 @@ from ansible.playbook.role.metadata import RoleMetadata
from ansible.playbook.taggable import Taggable
from ansible.plugins.loader import add_all_plugin_dirs
from ansible.utils.collection_loader import AnsibleCollectionConfig
-from ansible.utils.display import Display
from ansible.utils.vars import combine_vars
-display = Display()
__all__ = ['Role', 'hash_params']
@@ -100,7 +99,7 @@ class Role(Base, Conditional, Taggable, CollectionSearch):
_delegate_to = FieldAttribute(isa='string')
_delegate_facts = FieldAttribute(isa='bool')
- def __init__(self, play=None, from_files=None, from_include=False):
+ def __init__(self, play=None, from_files=None, from_include=False, validate=True):
self._role_name = None
self._role_path = None
self._role_collection = None
@@ -118,6 +117,7 @@ class Role(Base, Conditional, Taggable, CollectionSearch):
self._role_vars = dict()
self._had_task_run = dict()
self._completed = dict()
+ self._should_validate = validate
if from_files is None:
from_files = {}
@@ -137,7 +137,7 @@ class Role(Base, Conditional, Taggable, CollectionSearch):
return self._role_name
@staticmethod
- def load(role_include, play, parent_role=None, from_files=None, from_include=False):
+ def load(role_include, play, parent_role=None, from_files=None, from_include=False, validate=True):
if from_files is None:
from_files = {}
@@ -171,7 +171,7 @@ class Role(Base, Conditional, Taggable, CollectionSearch):
# for the in-flight in role cache as a sentinel that we're already trying to load
# that role?)
# see https://github.com/ansible/ansible/issues/61527
- r = Role(play=play, from_files=from_files, from_include=from_include)
+ r = Role(play=play, from_files=from_files, from_include=from_include, validate=validate)
r._load_role_data(role_include, parent_role=parent_role)
if role_include.get_name() not in play.ROLE_CACHE:
@@ -255,6 +255,11 @@ class Role(Base, Conditional, Taggable, CollectionSearch):
self.collections.append(default_append_collection)
task_data = self._load_role_yaml('tasks', main=self._from_files.get('tasks'))
+
+ if self._should_validate:
+ role_argspecs = self._get_role_argspecs()
+ task_data = self._prepend_validation_task(task_data, role_argspecs)
+
if task_data:
try:
self._task_blocks = load_list_of_blocks(task_data, play=self._play, role=self, loader=self._loader, variable_manager=self._variable_manager)
@@ -271,6 +276,91 @@ class Role(Base, Conditional, Taggable, CollectionSearch):
raise AnsibleParserError("The handlers/main.yml file for role '%s' must contain a list of tasks" % self._role_name,
obj=handler_data, orig_exc=e)
+ def _get_role_argspecs(self):
+ """Get the role argument spec data.
+
+ Role arg specs can be in one of two files in the role meta subdir: argument_specs.yml
+ or main.yml. The former has precedence over the latter. Data is not combined
+ between the files.
+
+ :returns: A dict of all data under the top-level ``argument_specs`` YAML key
+ in the argument spec file. An empty dict is returned if there is no
+ argspec data.
+ """
+ base_argspec_path = os.path.join(self._role_path, 'meta', 'argument_specs')
+
+ for ext in C.YAML_FILENAME_EXTENSIONS:
+ full_path = base_argspec_path + ext
+ if self._loader.path_exists(full_path):
+ # Note: _load_role_yaml() takes care of rebuilding the path.
+ argument_specs = self._load_role_yaml('meta', main='argument_specs')
+ return argument_specs.get('argument_specs', {})
+
+ # We did not find the meta/argument_specs.[yml|yaml] file, so use the spec
+ # dict from the role meta data, if it exists. Ansible 2.11 and later will
+ # have the 'argument_specs' attribute, but earlier versions will not.
+ return getattr(self._metadata, 'argument_specs', {})
+
+ def _prepend_validation_task(self, task_data, argspecs):
+ '''Insert a role validation task if we have a role argument spec.
+
+ This method will prepend a validation task to the front of the role task
+ list to perform argument spec validation before any other tasks, if an arg spec
+ exists for the entry point. Entry point defaults to `main`.
+
+ :param task_data: List of tasks loaded from the role.
+ :param argspecs: The role argument spec data dict.
+
+ :returns: The (possibly modified) task list.
+ '''
+ if argspecs:
+ # Determine the role entry point so we can retrieve the correct argument spec.
+ # This comes from the `tasks_from` value to include_role or import_role.
+ entrypoint = self._from_files.get('tasks', 'main')
+ entrypoint_arg_spec = argspecs.get(entrypoint)
+
+ if entrypoint_arg_spec:
+ validation_task = self._create_validation_task(entrypoint_arg_spec, entrypoint)
+
+ # Prepend our validate_argument_spec action to happen before any tasks provided by the role.
+ # 'any tasks' can and does include 0 or None tasks, in which cases we create a list of tasks and add our
+ # validate_argument_spec task
+ if not task_data:
+ task_data = []
+ task_data.insert(0, validation_task)
+ return task_data
+
+ def _create_validation_task(self, argument_spec, entrypoint_name):
+ '''Create a new task data structure that uses the validate_argument_spec action plugin.
+
+ :param argument_spec: The arg spec definition for a particular role entry point.
+ This will be the entire arg spec for the entry point as read from the input file.
+ :param entrypoint_name: The name of the role entry point associated with the
+ supplied `argument_spec`.
+ '''
+
+ # If the arg spec provides a short description, use it to flesh out the validation task name
+ task_name = "Validating arguments against arg spec '%s'" % entrypoint_name
+ if 'short_description' in argument_spec:
+ task_name = task_name + ' - ' + argument_spec['short_description']
+
+ return {
+ 'action': {
+ 'module': 'ansible.builtin.validate_argument_spec',
+ # Pass only the 'options' portion of the arg spec to the module.
+ 'argument_spec': argument_spec.get('options', {}),
+ 'provided_arguments': self._role_params,
+ 'validate_args_context': {
+ 'type': 'role',
+ 'name': self._role_name,
+ 'argument_spec_name': entrypoint_name,
+ 'path': self._role_path
+ },
+ },
+ 'name': task_name,
+ 'tags': ['always'],
+ }
+
def _load_role_yaml(self, subdir, main=None, allow_dir=False):
'''
Find and load role YAML files and return data found.
@@ -461,6 +551,8 @@ class Role(Base, Conditional, Taggable, CollectionSearch):
with each task, so tasks know by which route they were found, and
can correctly take their parent's tags/conditionals into account.
'''
+ from ansible.playbook.block import Block
+ from ansible.playbook.task import Task
block_list = []
@@ -474,14 +566,29 @@ class Role(Base, Conditional, Taggable, CollectionSearch):
dep_blocks = dep.compile(play=play, dep_chain=new_dep_chain)
block_list.extend(dep_blocks)
- for idx, task_block in enumerate(self._task_blocks):
+ for task_block in self._task_blocks:
new_task_block = task_block.copy()
new_task_block._dep_chain = new_dep_chain
new_task_block._play = play
- if idx == len(self._task_blocks) - 1:
- new_task_block._eor = True
block_list.append(new_task_block)
+ eor_block = Block(play=play)
+ eor_block._loader = self._loader
+ eor_block._role = self
+ eor_block._variable_manager = self._variable_manager
+ eor_block.run_once = False
+
+ eor_task = Task(block=eor_block)
+ eor_task._role = self
+ eor_task.action = 'meta'
+ eor_task.args = {'_raw_params': 'role_complete'}
+ eor_task.implicit = True
+ eor_task.tags = ['always']
+ eor_task.when = True
+
+ eor_block.block = [eor_task]
+ block_list.append(eor_block)
+
return block_list
def serialize(self, include_deps=True):
diff --git a/lib/ansible/playbook/role/metadata.py b/lib/ansible/playbook/role/metadata.py
index 1c5c5203..eac71ee4 100644
--- a/lib/ansible/playbook/role/metadata.py
+++ b/lib/ansible/playbook/role/metadata.py
@@ -42,6 +42,7 @@ class RoleMetadata(Base, CollectionSearch):
_allow_duplicates = FieldAttribute(isa='bool', default=False)
_dependencies = FieldAttribute(isa='list', default=list)
_galaxy_info = FieldAttribute(isa='GalaxyInfo')
+ _argument_specs = FieldAttribute(isa='dict', default=dict)
def __init__(self, owner=None):
self._owner = owner
diff --git a/lib/ansible/playbook/role/requirement.py b/lib/ansible/playbook/role/requirement.py
index 18cea8ff..59e9cf39 100644
--- a/lib/ansible/playbook/role/requirement.py
+++ b/lib/ansible/playbook/role/requirement.py
@@ -105,9 +105,7 @@ class RoleRequirement(RoleDefinition):
role["src"] = "git+" + role["src"]
if '+' in role["src"]:
- (scm, src) = role["src"].split('+')
- role["scm"] = scm
- role["src"] = src
+ role["scm"], dummy, role["src"] = role["src"].partition('+')
if 'name' not in role:
role["name"] = RoleRequirement.repo_url_to_role_name(role["src"])
diff --git a/lib/ansible/playbook/role_include.py b/lib/ansible/playbook/role_include.py
index 2ae80ca6..59803f27 100644
--- a/lib/ansible/playbook/role_include.py
+++ b/lib/ansible/playbook/role_include.py
@@ -44,7 +44,7 @@ class IncludeRole(TaskInclude):
BASE = ('name', 'role') # directly assigned
FROM_ARGS = ('tasks_from', 'vars_from', 'defaults_from', 'handlers_from') # used to populate from dict in role
- OTHER_ARGS = ('apply', 'public', 'allow_duplicates') # assigned to matching property
+ OTHER_ARGS = ('apply', 'public', 'allow_duplicates', 'rolespec_validate') # assigned to matching property
VALID_ARGS = tuple(frozenset(BASE + FROM_ARGS + OTHER_ARGS)) # all valid args
# =================================================================================
@@ -53,6 +53,7 @@ class IncludeRole(TaskInclude):
# private as this is a 'module options' vs a task property
_allow_duplicates = FieldAttribute(isa='bool', default=True, private=True)
_public = FieldAttribute(isa='bool', default=False, private=True)
+ _rolespec_validate = FieldAttribute(isa='bool', default=True)
def __init__(self, block=None, role=None, task_include=None):
@@ -80,7 +81,7 @@ class IncludeRole(TaskInclude):
# build role
actual_role = Role.load(ri, myplay, parent_role=self._parent_role, from_files=self._from_files,
- from_include=True)
+ from_include=True, validate=self.rolespec_validate)
actual_role._metadata.allow_duplicates = self.allow_duplicates
if self.statically_loaded or self.public:
diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py
index e9f0a0ab..2052b104 100644
--- a/lib/ansible/playbook/task.py
+++ b/lib/ansible/playbook/task.py
@@ -97,6 +97,7 @@ class Task(Base, Conditional, Taggable, CollectionSearch):
self._role = role
self._parent = None
+ self.implicit = False
if task_include:
self._parent = task_include
@@ -220,7 +221,7 @@ class Task(Base, Conditional, Taggable, CollectionSearch):
except AnsibleParserError as e:
# if the raises exception was created with obj=ds args, then it includes the detail
# so we dont need to add it so we can just re raise.
- if e._obj:
+ if e.obj:
raise
# But if it wasn't, we can add the yaml object now to get more detail
raise AnsibleParserError(to_native(e), obj=ds, orig_exc=e)
@@ -411,6 +412,8 @@ class Task(Base, Conditional, Taggable, CollectionSearch):
if self._role:
new_me._role = self._role
+ new_me.implicit = self.implicit
+
return new_me
def serialize(self):
@@ -427,6 +430,8 @@ class Task(Base, Conditional, Taggable, CollectionSearch):
if self._ansible_internal_redirect_list:
data['_ansible_internal_redirect_list'] = self._ansible_internal_redirect_list[:]
+ data['implicit'] = self.implicit
+
return data
def deserialize(self, data):
@@ -457,6 +462,8 @@ class Task(Base, Conditional, Taggable, CollectionSearch):
self._ansible_internal_redirect_list = data.get('_ansible_internal_redirect_list', [])
+ self.implicit = data.get('implicit', False)
+
super(Task, self).deserialize(data)
def set_loader(self, loader):
diff --git a/lib/ansible/playbook/task_include.py b/lib/ansible/playbook/task_include.py
index e2dc92e9..48eee188 100644
--- a/lib/ansible/playbook/task_include.py
+++ b/lib/ansible/playbook/task_include.py
@@ -43,7 +43,7 @@ class TaskInclude(Task):
OTHER_ARGS = frozenset(('apply',)) # assigned to matching property
VALID_ARGS = BASE.union(OTHER_ARGS) # all valid args
VALID_INCLUDE_KEYWORDS = frozenset(('action', 'args', 'collections', 'debugger', 'ignore_errors', 'loop', 'loop_control',
- 'loop_with', 'name', 'no_log', 'register', 'run_once', 'tags', 'vars',
+ 'loop_with', 'name', 'no_log', 'register', 'run_once', 'tags', 'timeout', 'vars',
'when'))
# =================================================================================
diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py
index 4e5e82ad..3c3afa0f 100644
--- a/lib/ansible/plugins/action/__init__.py
+++ b/lib/ansible/plugins/action/__init__.py
@@ -14,11 +14,10 @@ import random
import re
import stat
import tempfile
-import time
from abc import ABCMeta, abstractmethod
from ansible import constants as C
-from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleActionSkip, AnsibleActionFail, AnsiblePluginRemovedError
+from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleActionSkip, AnsibleActionFail, AnsibleAuthenticationFailure
from ansible.executor.module_common import modify_module
from ansible.executor.interpreter_discovery import discover_interpreter, InterpreterDiscoveryRequiredError
from ansible.module_utils.common._collections_compat import Sequence
@@ -32,6 +31,7 @@ from ansible.utils.collection_loader import resource_from_fqcr
from ansible.utils.display import Display
from ansible.utils.unsafe_proxy import wrap_var, AnsibleUnsafeText
from ansible.vars.clean import remove_internal_keys
+from ansible.utils.plugin_docs import get_versioned_doclink
display = Display()
@@ -89,6 +89,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
* Module parameters. These are stored in self._task.args
"""
+ # does not default to {'changed': False, 'failed': False}, as it breaks async
result = {}
if tmp is not None:
@@ -233,6 +234,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
module_compression=self._play_context.module_compression,
async_timeout=self._task.async_val,
environment=final_environment,
+ remote_is_local=bool(getattr(self._connection, '_remote_is_local', False)),
**become_kwargs)
break
except InterpreterDiscoveryRequiredError as idre:
@@ -518,74 +520,208 @@ class ActionBase(with_metaclass(ABCMeta, object)):
file with chown which only works in case the remote_user is
privileged or the remote systems allows chown calls by unprivileged
users (e.g. HP-UX)
- * If the chown fails we can set the file to be world readable so that
+ * If the above fails, we next try 'chmod +a' which is a macOS way of
+ setting ACLs on files.
+ * If the above fails, we check if ansible_common_remote_group is set.
+ If it is, we attempt to chgrp the file to its value. This is useful
+ if the remote_user has a group in common with the become_user. As the
+ remote_user, we can chgrp the file to that group and allow the
+ become_user to read it.
+ * If (the chown fails AND ansible_common_remote_group is not set) OR
+ (ansible_common_remote_group is set AND the chgrp (or following chmod)
+ returned non-zero), we can set the file to be world readable so that
the second unprivileged user can read the file.
Since this could allow other users to get access to private
information we only do this if ansible is configured with
- "allow_world_readable_tmpfiles" in the ansible.cfg
+ "allow_world_readable_tmpfiles" in the ansible.cfg. Also note that
+ when ansible_common_remote_group is set this final fallback is very
+ unlikely to ever be triggered, so long as chgrp was successful. But
+ just because the chgrp was successful, does not mean Ansible can
+ necessarily access the files (if, for example, the variable was set
+ to a group that remote_user is in, and can chgrp to, but does not have
+ in common with become_user).
"""
if remote_user is None:
remote_user = self._get_remote_user()
+ # Step 1: Are we on windows?
if getattr(self._connection._shell, "_IS_WINDOWS", False):
- # This won't work on Powershell as-is, so we'll just completely skip until
- # we have a need for it, at which point we'll have to do something different.
+ # This won't work on Powershell as-is, so we'll just completely
+ # skip until we have a need for it, at which point we'll have to do
+ # something different.
return remote_paths
- if self._is_become_unprivileged():
- # Unprivileged user that's different than the ssh user. Let's get
- # to work!
-
- # Try to use file system acls to make the files readable for sudo'd
- # user
+ # Step 2: If we're not becoming an unprivileged user, we are roughly
+ # done. Make the files +x if we're asked to, and return.
+ if not self._is_become_unprivileged():
if execute:
- chmod_mode = 'rx'
- setfacl_mode = 'r-x'
- else:
- chmod_mode = 'rX'
- # NOTE: this form fails silently on freebsd. We currently
- # never call _fixup_perms2() with execute=False but if we
- # start to we'll have to fix this.
- setfacl_mode = 'r-X'
+ # Can't depend on the file being transferred with execute permissions.
+ # Only need user perms because no become was used here
+ res = self._remote_chmod(remote_paths, 'u+x')
+ if res['rc'] != 0:
+ raise AnsibleError(
+ 'Failed to set execute bit on remote files '
+ '(rc: {0}, err: {1})'.format(
+ res['rc'],
+ to_native(res['stderr'])))
+ return remote_paths
- res = self._remote_set_user_facl(remote_paths, self.get_become_option('become_user'), setfacl_mode)
- if res['rc'] != 0:
- # File system acls failed; let's try to use chown next
- # Set executable bit first as on some systems an
- # unprivileged user can use chown
- if execute:
- res = self._remote_chmod(remote_paths, 'u+x')
- if res['rc'] != 0:
- raise AnsibleError('Failed to set file mode on remote temporary files (rc: {0}, err: {1})'.format(res['rc'], to_native(res['stderr'])))
-
- res = self._remote_chown(remote_paths, self.get_become_option('become_user'))
- if res['rc'] != 0 and remote_user in self._get_admin_users():
- # chown failed even if remote_user is administrator/root
- raise AnsibleError('Failed to change ownership of the temporary files Ansible needs to create despite connecting as a privileged user. '
- 'Unprivileged become user would be unable to read the file.')
- elif res['rc'] != 0:
- if self.get_shell_option('world_readable_temp', C.ALLOW_WORLD_READABLE_TMPFILES):
- # chown and fs acls failed -- do things this insecure
- # way only if the user opted in in the config file
- display.warning('Using world-readable permissions for temporary files Ansible needs to create when becoming an unprivileged user. '
- 'This may be insecure. For information on securing this, see '
- 'https://docs.ansible.com/ansible/user_guide/become.html#risks-of-becoming-an-unprivileged-user')
- res = self._remote_chmod(remote_paths, 'a+%s' % chmod_mode)
- if res['rc'] != 0:
- raise AnsibleError('Failed to set file mode on remote files (rc: {0}, err: {1})'.format(res['rc'], to_native(res['stderr'])))
- else:
- raise AnsibleError('Failed to set permissions on the temporary files Ansible needs to create when becoming an unprivileged user '
- '(rc: %s, err: %s}). For information on working around this, see '
- 'https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user'
- % (res['rc'], to_native(res['stderr'])))
- elif execute:
- # Can't depend on the file being transferred with execute permissions.
- # Only need user perms because no become was used here
+ # If we're still here, we have an unprivileged user that's different
+ # than the ssh user.
+ become_user = self.get_become_option('become_user')
+
+ # Try to use file system acls to make the files readable for sudo'd
+ # user
+ if execute:
+ chmod_mode = 'rx'
+ setfacl_mode = 'r-x'
+ # Apple patches their "file_cmds" chmod with ACL support
+ chmod_acl_mode = '{0} allow read,execute'.format(become_user)
+ # POSIX-draft ACL specification. Solaris, maybe others.
+ # See chmod(1) on something Solaris-based for syntax details.
+ posix_acl_mode = 'A+user:{0}:rx:allow'.format(become_user)
+ else:
+ chmod_mode = 'rX'
+ # TODO: this form fails silently on freebsd. We currently
+ # never call _fixup_perms2() with execute=False but if we
+ # start to we'll have to fix this.
+ setfacl_mode = 'r-X'
+ # Apple
+ chmod_acl_mode = '{0} allow read'.format(become_user)
+ # POSIX-draft
+ posix_acl_mode = 'A+user:{0}:r:allow'.format(become_user)
+
+ # Step 3a: Are we able to use setfacl to add user ACLs to the file?
+ res = self._remote_set_user_facl(
+ remote_paths,
+ become_user,
+ setfacl_mode)
+
+ if res['rc'] == 0:
+ return remote_paths
+
+ # Step 3b: Set execute if we need to. We do this before anything else
+ # because some of the methods below might work but not let us set +x
+ # as part of them.
+ if execute:
res = self._remote_chmod(remote_paths, 'u+x')
if res['rc'] != 0:
- raise AnsibleError('Failed to set execute bit on remote files (rc: {0}, err: {1})'.format(res['rc'], to_native(res['stderr'])))
+ raise AnsibleError(
+ 'Failed to set file mode on remote temporary files '
+ '(rc: {0}, err: {1})'.format(
+ res['rc'],
+ to_native(res['stderr'])))
+
+ # Step 3c: File system ACLs failed above; try falling back to chown.
+ res = self._remote_chown(remote_paths, become_user)
+ if res['rc'] == 0:
+ return remote_paths
+
+ # Check if we are an admin/root user. If we are and got here, it means
+ # we failed to chown as root and something weird has happened.
+ if remote_user in self._get_admin_users():
+ raise AnsibleError(
+ 'Failed to change ownership of the temporary files Ansible '
+ 'needs to create despite connecting as a privileged user. '
+ 'Unprivileged become user would be unable to read the '
+ 'file.')
+
+ # Step 3d: Try macOS's special chmod + ACL
+ # macOS chmod's +a flag takes its own argument. As a slight hack, we
+ # pass that argument as the first element of remote_paths. So we end
+ # up running `chmod +a [that argument] [file 1] [file 2] ...`
+ try:
+ res = self._remote_chmod([chmod_acl_mode] + list(remote_paths), '+a')
+ except AnsibleAuthenticationFailure as e:
+ # Solaris-based chmod will return 5 when it sees an invalid mode,
+ # and +a is invalid there. Because it returns 5, which is the same
+ # thing sshpass returns on auth failure, our sshpass code will
+ # assume that auth failed. If we don't handle that case here, none
+ # of the other logic below will get run. This is fairly hacky and a
+ # corner case, but probably one that shows up pretty often in
+ # Solaris-based environments (and possibly others).
+ pass
+ else:
+ if res['rc'] == 0:
+ return remote_paths
+
+ # Step 3e: Try Solaris/OpenSolaris/OpenIndiana-sans-setfacl chmod
+ # Similar to macOS above, Solaris 11.4 drops setfacl and takes file ACLs
+ # via chmod instead. OpenSolaris and illumos-based distros allow for
+ # using either setfacl or chmod, and compatibility depends on filesystem.
+ # It should be possible to debug this branch by installing OpenIndiana
+ # (use ZFS) and going unpriv -> unpriv.
+ res = self._remote_chmod(remote_paths, posix_acl_mode)
+ if res['rc'] == 0:
+ return remote_paths
- return remote_paths
+ # we'll need this down here
+ become_link = get_versioned_doclink('user_guide/become.html')
+
+ # Step 3f: Common group
+ # Otherwise, we're a normal user. We failed to chown the paths to the
+ # unprivileged user, but if we have a common group with them, we should
+ # be able to chown it to that.
+ #
+ # Note that we have no way of knowing if this will actually work... just
+ # because chgrp exits successfully does not mean that Ansible will work.
+ # We could check if the become user is in the group, but this would
+ # create an extra round trip.
+ #
+ # Also note that due to the above, this can prevent the
+ # ALLOW_WORLD_READABLE_TMPFILES logic below from ever getting called. We
+ # leave this up to the user to rectify if they have both of these
+ # features enabled.
+ group = self.get_shell_option('common_remote_group')
+ if group is not None:
+ res = self._remote_chgrp(remote_paths, group)
+ if res['rc'] == 0:
+ # warn user that something might go weirdly here.
+ if self.get_shell_option('world_readable_temp'):
+ display.warning(
+ 'Both common_remote_group and '
+ 'allow_world_readable_tmpfiles are set. chgrp was '
+ 'successful, but there is no guarantee that Ansible '
+ 'will be able to read the files after this operation, '
+ 'particularly if common_remote_group was set to a '
+ 'group of which the unprivileged become user is not a '
+ 'member. In this situation, '
+ 'allow_world_readable_tmpfiles is a no-op. See this '
+ 'URL for more details: %s'
+ '#becoming-an-unprivileged-user' % become_link)
+ if execute:
+ group_mode = 'g+rwx'
+ else:
+ group_mode = 'g+rw'
+ res = self._remote_chmod(remote_paths, group_mode)
+ if res['rc'] == 0:
+ return remote_paths
+
+ # Step 4: World-readable temp directory
+ if self.get_shell_option('world_readable_temp'):
+ # chown and fs acls failed -- do things this insecure way only if
+ # the user opted in in the config file
+ display.warning(
+ 'Using world-readable permissions for temporary files Ansible '
+ 'needs to create when becoming an unprivileged user. This may '
+ 'be insecure. For information on securing this, see %s'
+ '#risks-of-becoming-an-unprivileged-user' % become_link)
+ res = self._remote_chmod(remote_paths, 'a+%s' % chmod_mode)
+ if res['rc'] == 0:
+ return remote_paths
+ raise AnsibleError(
+ 'Failed to set file mode on remote files '
+ '(rc: {0}, err: {1})'.format(
+ res['rc'],
+ to_native(res['stderr'])))
+
+ raise AnsibleError(
+ 'Failed to set permissions on the temporary files Ansible needs '
+ 'to create when becoming an unprivileged user '
+ '(rc: %s, err: %s}). For information on working around this, see %s'
+ '#becoming-an-unprivileged-user' % (
+ res['rc'],
+ to_native(res['stderr']), become_link))
def _remote_chmod(self, paths, mode, sudoable=False):
'''
@@ -603,6 +739,14 @@ class ActionBase(with_metaclass(ABCMeta, object)):
res = self._low_level_execute_command(cmd, sudoable=sudoable)
return res
+ def _remote_chgrp(self, paths, group, sudoable=False):
+ '''
+ Issue a remote chgrp command
+ '''
+ cmd = self._connection._shell.chgrp(paths, group)
+ res = self._low_level_execute_command(cmd, sudoable=sudoable)
+ return res
+
def _remote_set_user_facl(self, paths, user, mode, sudoable=False):
'''
Issue a remote call to setfacl
@@ -1029,7 +1173,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
def _parse_returned_data(self, res):
try:
- filtered_output, warnings = _filter_non_json_lines(res.get('stdout', u''))
+ filtered_output, warnings = _filter_non_json_lines(res.get('stdout', u''), objects_only=True)
for w in warnings:
display.warning(w)
diff --git a/lib/ansible/plugins/action/command.py b/lib/ansible/plugins/action/command.py
index 53187ec8..f267eb73 100644
--- a/lib/ansible/plugins/action/command.py
+++ b/lib/ansible/plugins/action/command.py
@@ -17,7 +17,7 @@ class ActionModule(ActionBase):
del tmp # tmp no longer has any effect
# Command module has a special config option to turn off the command nanny warnings
- if 'warn' not in self._task.args:
+ if 'warn' not in self._task.args and C.COMMAND_WARNINGS:
self._task.args['warn'] = C.COMMAND_WARNINGS
wrap_async = self._task.async_val and not self._connection.has_native_async
diff --git a/lib/ansible/plugins/action/gather_facts.py b/lib/ansible/plugins/action/gather_facts.py
index eac63e17..f35481d8 100644
--- a/lib/ansible/plugins/action/gather_facts.py
+++ b/lib/ansible/plugins/action/gather_facts.py
@@ -41,7 +41,13 @@ class ActionModule(ActionBase):
mod_args = dict((k, v) for k, v in mod_args.items() if v is not None)
# handle module defaults
- mod_args = get_action_args_with_defaults(fact_module, mod_args, self._task.module_defaults, self._templar, self._task._ansible_internal_redirect_list)
+ redirect_list = self._shared_loader_obj.module_loader.find_plugin_with_context(
+ fact_module, collection_list=self._task.collections
+ ).redirect_list
+
+ mod_args = get_action_args_with_defaults(
+ fact_module, mod_args, self._task.module_defaults, self._templar, redirect_list
+ )
return mod_args
@@ -62,7 +68,9 @@ class ActionModule(ActionBase):
result = super(ActionModule, self).run(tmp, task_vars)
result['ansible_facts'] = {}
- modules = C.config.get_config_value('FACTS_MODULES', variables=task_vars)
+ # copy the value with list() so we don't mutate the config
+ modules = list(C.config.get_config_value('FACTS_MODULES', variables=task_vars))
+
parallel = task_vars.pop('ansible_facts_parallel', self._task.args.pop('parallel', None))
if 'smart' in modules:
connection_map = C.config.get_config_value('CONNECTION_FACTS_MODULES', variables=task_vars)
diff --git a/lib/ansible/plugins/action/package.py b/lib/ansible/plugins/action/package.py
index c1638658..da759ad8 100644
--- a/lib/ansible/plugins/action/package.py
+++ b/lib/ansible/plugins/action/package.py
@@ -19,6 +19,7 @@ __metaclass__ = type
from ansible.errors import AnsibleAction, AnsibleActionFail
from ansible.executor.module_common import get_action_args_with_defaults
+from ansible.module_utils.facts.system.pkg_mgr import PKG_MGRS
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
@@ -29,8 +30,7 @@ class ActionModule(ActionBase):
TRANSFERS_FILES = False
- BUILTIN_PKG_MGR_MODULES = set(['apk', 'apt', 'dnf', 'homebrew', 'installp', 'macports', 'opkg', 'portage', 'pacman',
- 'pkg5', 'pkgin', 'pkgng', 'sorcery', 'svr4pkg', 'swdepot', 'swupd', 'urpmi', 'xbps', 'yum', 'zypper'])
+ BUILTIN_PKG_MGR_MODULES = set([manager['name'] for manager in PKG_MGRS])
def run(self, tmp=None, task_vars=None):
''' handler for package operations '''
@@ -71,8 +71,9 @@ class ActionModule(ActionBase):
del new_module_args['use']
# get defaults for specific module
+ context = self._shared_loader_obj.module_loader.find_plugin_with_context(module, collection_list=self._task.collections)
new_module_args = get_action_args_with_defaults(
- module, new_module_args, self._task.module_defaults, self._templar, self._task._ansible_internal_redirect_list
+ module, new_module_args, self._task.module_defaults, self._templar, context.redirect_list
)
if module in self.BUILTIN_PKG_MGR_MODULES:
diff --git a/lib/ansible/plugins/action/pause.py b/lib/ansible/plugins/action/pause.py
index 88babe61..2bc7d6f6 100644
--- a/lib/ansible/plugins/action/pause.py
+++ b/lib/ansible/plugins/action/pause.py
@@ -40,22 +40,23 @@ display = Display()
try:
import curses
+ import io
# Nest the try except since curses.error is not available if curses did not import
try:
curses.setupterm()
HAS_CURSES = True
- except (curses.error, TypeError):
+ except (curses.error, TypeError, io.UnsupportedOperation):
HAS_CURSES = False
except ImportError:
HAS_CURSES = False
+MOVE_TO_BOL = b'\r'
+CLEAR_TO_EOL = b'\x1b[K'
if HAS_CURSES:
- MOVE_TO_BOL = curses.tigetstr('cr')
- CLEAR_TO_EOL = curses.tigetstr('el')
-else:
- MOVE_TO_BOL = b'\r'
- CLEAR_TO_EOL = b'\x1b[K'
+ # curses.tigetstr() returns None in some circumstances
+ MOVE_TO_BOL = curses.tigetstr('cr') or MOVE_TO_BOL
+ CLEAR_TO_EOL = curses.tigetstr('el') or CLEAR_TO_EOL
class AnsibleTimeoutExceeded(Exception):
@@ -245,19 +246,20 @@ class ActionModule(ActionBase):
clear_line(stdout)
raise KeyboardInterrupt
- # read key presses and act accordingly
- if key_pressed in (b'\r', b'\n'):
- clear_line(stdout)
- break
- elif key_pressed in backspace:
- # delete a character if backspace is pressed
- result['user_input'] = result['user_input'][:-1]
- clear_line(stdout)
- if echo:
- stdout.write(result['user_input'])
- stdout.flush()
- else:
- result['user_input'] += key_pressed
+ if not seconds:
+ # read key presses and act accordingly
+ if key_pressed in (b'\r', b'\n'):
+ clear_line(stdout)
+ break
+ elif key_pressed in backspace:
+ # delete a character if backspace is pressed
+ result['user_input'] = result['user_input'][:-1]
+ clear_line(stdout)
+ if echo:
+ stdout.write(result['user_input'])
+ stdout.flush()
+ else:
+ result['user_input'] += key_pressed
except KeyboardInterrupt:
signal.alarm(0)
diff --git a/lib/ansible/plugins/action/reboot.py b/lib/ansible/plugins/action/reboot.py
index d898a1ae..64397b12 100644
--- a/lib/ansible/plugins/action/reboot.py
+++ b/lib/ansible/plugins/action/reboot.py
@@ -12,8 +12,7 @@ from datetime import datetime, timedelta
from ansible.errors import AnsibleError, AnsibleConnectionFailure
from ansible.module_utils._text import to_native, to_text
-from ansible.module_utils.common.collections import is_string
-from ansible.module_utils.common.validation import check_type_str
+from ansible.module_utils.common.validation import check_type_list, check_type_str
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
@@ -32,9 +31,10 @@ class ActionModule(ActionBase):
'msg',
'post_reboot_delay',
'pre_reboot_delay',
- 'test_command',
+ 'reboot_command',
'reboot_timeout',
- 'search_paths'
+ 'search_paths',
+ 'test_command',
))
DEFAULT_REBOOT_TIMEOUT = 600
@@ -114,11 +114,25 @@ class ActionModule(ActionBase):
return value
def get_shutdown_command_args(self, distribution):
- args = self._get_value_from_facts('SHUTDOWN_COMMAND_ARGS', distribution, 'DEFAULT_SHUTDOWN_COMMAND_ARGS')
- # Convert seconds to minutes. If less that 60, set it to 0.
- delay_min = self.pre_reboot_delay // 60
- reboot_message = self._task.args.get('msg', self.DEFAULT_REBOOT_MESSAGE)
- return args.format(delay_sec=self.pre_reboot_delay, delay_min=delay_min, message=reboot_message)
+ reboot_command = self._task.args.get('reboot_command')
+ if reboot_command is not None:
+ try:
+ reboot_command = check_type_str(reboot_command, allow_conversion=False)
+ except TypeError as e:
+ raise AnsibleError("Invalid value given for 'reboot_command': %s." % to_native(e))
+
+ # No args were provided
+ try:
+ return reboot_command.split(' ', 1)[1]
+ except IndexError:
+ return ''
+ else:
+ args = self._get_value_from_facts('SHUTDOWN_COMMAND_ARGS', distribution, 'DEFAULT_SHUTDOWN_COMMAND_ARGS')
+
+ # Convert seconds to minutes. If less that 60, set it to 0.
+ delay_min = self.pre_reboot_delay // 60
+ reboot_message = self._task.args.get('msg', self.DEFAULT_REBOOT_MESSAGE)
+ return args.format(delay_sec=self.pre_reboot_delay, delay_min=delay_min, message=reboot_message)
def get_distribution(self, task_vars):
# FIXME: only execute the module if we don't already have the facts we need
@@ -142,44 +156,49 @@ class ActionModule(ActionBase):
raise AnsibleError('Failed to get distribution information. Missing "{0}" in output.'.format(ke.args[0]))
def get_shutdown_command(self, task_vars, distribution):
- shutdown_bin = self._get_value_from_facts('SHUTDOWN_COMMANDS', distribution, 'DEFAULT_SHUTDOWN_COMMAND')
- default_search_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
- search_paths = self._task.args.get('search_paths', default_search_paths)
+ reboot_command = self._task.args.get('reboot_command')
+ if reboot_command is not None:
+ try:
+ reboot_command = check_type_str(reboot_command, allow_conversion=False)
+ except TypeError as e:
+ raise AnsibleError("Invalid value given for 'reboot_command': %s." % to_native(e))
+ shutdown_bin = reboot_command.split(' ', 1)[0]
+ else:
+ shutdown_bin = self._get_value_from_facts('SHUTDOWN_COMMANDS', distribution, 'DEFAULT_SHUTDOWN_COMMAND')
- # FIXME: switch all this to user arg spec validation methods when they are available
- # Convert bare strings to a list
- if is_string(search_paths):
- search_paths = [search_paths]
+ if shutdown_bin[0] == '/':
+ return shutdown_bin
+ else:
+ default_search_paths = ['/sbin', '/bin', '/usr/sbin', '/usr/bin', '/usr/local/sbin']
+ search_paths = self._task.args.get('search_paths', default_search_paths)
- # Error if we didn't get a list
- err_msg = "'search_paths' must be a string or flat list of strings, got {0}"
- try:
- incorrect_type = any(not is_string(x) for x in search_paths)
- if not isinstance(search_paths, list) or incorrect_type:
- raise TypeError
- except TypeError:
- raise AnsibleError(err_msg.format(search_paths))
-
- display.debug('{action}: running find module looking in {paths} to get path for "{command}"'.format(
- action=self._task.action,
- command=shutdown_bin,
- paths=search_paths))
- find_result = self._execute_module(
- task_vars=task_vars,
- # prevent collection search by calling with ansible.legacy (still allows library/ override of find)
- module_name='ansible.legacy.find',
- module_args={
- 'paths': search_paths,
- 'patterns': [shutdown_bin],
- 'file_type': 'any'
- }
- )
-
- full_path = [x['path'] for x in find_result['files']]
- if not full_path:
- raise AnsibleError('Unable to find command "{0}" in search paths: {1}'.format(shutdown_bin, search_paths))
- self._shutdown_command = full_path[0]
- return self._shutdown_command
+ try:
+ # Convert bare strings to a list
+ search_paths = check_type_list(search_paths)
+ except TypeError:
+ err_msg = "'search_paths' must be a string or flat list of strings, got {0}"
+ raise AnsibleError(err_msg.format(search_paths))
+
+ display.debug('{action}: running find module looking in {paths} to get path for "{command}"'.format(
+ action=self._task.action,
+ command=shutdown_bin,
+ paths=search_paths))
+
+ find_result = self._execute_module(
+ task_vars=task_vars,
+ # prevent collection search by calling with ansible.legacy (still allows library/ override of find)
+ module_name='ansible.legacy.find',
+ module_args={
+ 'paths': search_paths,
+ 'patterns': [shutdown_bin],
+ 'file_type': 'any'
+ }
+ )
+
+ full_path = [x['path'] for x in find_result['files']]
+ if not full_path:
+ raise AnsibleError('Unable to find command "{0}" in search paths: {1}'.format(shutdown_bin, search_paths))
+ return full_path[0]
def deprecated_args(self):
for arg, version in self.DEPRECATED_ARGS.items():
@@ -322,7 +341,7 @@ class ActionModule(ActionBase):
if reboot_result['rc'] != 0:
result['failed'] = True
result['rebooted'] = False
- result['msg'] = "Reboot command failed. Error was {stdout}, {stderr}".format(
+ result['msg'] = "Reboot command failed. Error was: '{stdout}, {stderr}'".format(
stdout=to_native(reboot_result['stdout'].strip()),
stderr=to_native(reboot_result['stderr'].strip()))
return result
diff --git a/lib/ansible/plugins/action/service.py b/lib/ansible/plugins/action/service.py
index 42d44361..1b5924a1 100644
--- a/lib/ansible/plugins/action/service.py
+++ b/lib/ansible/plugins/action/service.py
@@ -79,8 +79,9 @@ class ActionModule(ActionBase):
self._display.warning('Ignoring "%s" as it is not used in "%s"' % (unused, module))
# get defaults for specific module
+ context = self._shared_loader_obj.module_loader.find_plugin_with_context(module, collection_list=self._task.collections)
new_module_args = get_action_args_with_defaults(
- module, new_module_args, self._task.module_defaults, self._templar, self._task._ansible_internal_redirect_list
+ module, new_module_args, self._task.module_defaults, self._templar, context.redirect_list
)
# collection prefix known internal modules to avoid collisions from collections search, while still allowing library/ overrides
diff --git a/lib/ansible/plugins/action/set_fact.py b/lib/ansible/plugins/action/set_fact.py
index d7fe573c..df1c0a43 100644
--- a/lib/ansible/plugins/action/set_fact.py
+++ b/lib/ansible/plugins/action/set_fact.py
@@ -18,6 +18,7 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+from ansible.errors import AnsibleActionFail
from ansible.module_utils.six import iteritems, string_types
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
@@ -37,8 +38,7 @@ class ActionModule(ActionBase):
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
- facts = dict()
-
+ facts = {}
cacheable = boolean(self._task.args.pop('cacheable', False))
if self._task.args:
@@ -46,16 +46,23 @@ class ActionModule(ActionBase):
k = self._templar.template(k)
if not isidentifier(k):
- result['failed'] = True
- result['msg'] = ("The variable name '%s' is not valid. Variables must start with a letter or underscore character, and contain only "
- "letters, numbers and underscores." % k)
- return result
+ raise AnsibleActionFail("The variable name '%s' is not valid. Variables must start with a letter or underscore character, "
+ "and contain only letters, numbers and underscores." % k)
+ # NOTE: this should really use BOOLEANS from convert_bool, but only in the k=v case,
+ # right now it converts matching explicit YAML strings also when 'jinja2_native' is disabled.
if not C.DEFAULT_JINJA2_NATIVE and isinstance(v, string_types) and v.lower() in ('true', 'false', 'yes', 'no'):
v = boolean(v, strict=False)
facts[k] = v
+ else:
+ raise AnsibleActionFail('No key/value pairs provided, at least one is required for this action to succeed')
+
+ if facts:
+ # just as _facts actions, we don't set changed=true as we are not modifying the actual host
+ result['ansible_facts'] = facts
+ result['_ansible_facts_cacheable'] = cacheable
+ else:
+ # this should not happen, but JIC we get here
+ raise AnsibleActionFail('Unable to create any variables with provided arguments')
- result['changed'] = False
- result['ansible_facts'] = facts
- result['_ansible_facts_cacheable'] = cacheable
return result
diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py
index 645afff2..92949727 100644
--- a/lib/ansible/plugins/action/template.py
+++ b/lib/ansible/plugins/action/template.py
@@ -17,7 +17,7 @@ from ansible.module_utils._text import to_bytes, to_text, to_native
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.module_utils.six import string_types
from ansible.plugins.action import ActionBase
-from ansible.template import generate_ansible_template_vars
+from ansible.template import generate_ansible_template_vars, AnsibleEnvironment
class ActionModule(ActionBase):
@@ -129,14 +129,21 @@ class ActionModule(ActionBase):
# add ansible 'template' vars
temp_vars = task_vars.copy()
- temp_vars.update(generate_ansible_template_vars(source, dest))
-
- with self._templar.set_temporary_context(searchpath=searchpath, newline_sequence=newline_sequence,
- block_start_string=block_start_string, block_end_string=block_end_string,
- variable_start_string=variable_start_string, variable_end_string=variable_end_string,
- trim_blocks=trim_blocks, lstrip_blocks=lstrip_blocks,
- available_variables=temp_vars):
- resultant = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False)
+ temp_vars.update(generate_ansible_template_vars(self._task.args.get('src', None), source, dest))
+
+ # force templar to use AnsibleEnvironment to prevent issues with native types
+ # https://github.com/ansible/ansible/issues/46169
+ templar = self._templar.copy_with_new_env(environment_class=AnsibleEnvironment,
+ searchpath=searchpath,
+ newline_sequence=newline_sequence,
+ block_start_string=block_start_string,
+ block_end_string=block_end_string,
+ variable_start_string=variable_start_string,
+ variable_end_string=variable_end_string,
+ trim_blocks=trim_blocks,
+ lstrip_blocks=lstrip_blocks,
+ available_variables=temp_vars)
+ resultant = templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False)
except AnsibleAction:
raise
except Exception as e:
diff --git a/lib/ansible/plugins/action/validate_argument_spec.py b/lib/ansible/plugins/action/validate_argument_spec.py
new file mode 100644
index 00000000..e73729e0
--- /dev/null
+++ b/lib/ansible/plugins/action/validate_argument_spec.py
@@ -0,0 +1,94 @@
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleError
+from ansible.plugins.action import ActionBase
+from ansible.module_utils.six import iteritems, string_types
+from ansible.module_utils.common.arg_spec import ArgumentSpecValidator
+from ansible.module_utils.errors import AnsibleValidationErrorMultiple
+from ansible.utils.vars import combine_vars
+
+
+class ActionModule(ActionBase):
+ ''' Validate an arg spec'''
+
+ TRANSFERS_FILES = False
+
+ def get_args_from_task_vars(self, argument_spec, task_vars):
+ '''
+ Get any arguments that may come from `task_vars`.
+
+ Expand templated variables so we can validate the actual values.
+
+ :param argument_spec: A dict of the argument spec.
+ :param task_vars: A dict of task variables.
+
+ :returns: A dict of values that can be validated against the arg spec.
+ '''
+ args = {}
+
+ for argument_name, argument_attrs in iteritems(argument_spec):
+ if argument_name in task_vars:
+ args[argument_name] = task_vars[argument_name]
+ args = self._templar.template(args)
+ return args
+
+ def run(self, tmp=None, task_vars=None):
+ '''
+ Validate an argument specification against a provided set of data.
+
+ The `validate_argument_spec` module expects to receive the arguments:
+ - argument_spec: A dict whose keys are the valid argument names, and
+ whose values are dicts of the argument attributes (type, etc).
+ - provided_arguments: A dict whose keys are the argument names, and
+ whose values are the argument value.
+
+ :param tmp: Deprecated. Do not use.
+ :param task_vars: A dict of task variables.
+ :return: An action result dict, including a 'argument_errors' key with a
+ list of validation errors found.
+ '''
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ # This action can be called from anywhere, so pass in some info about what it is
+ # validating args for so the error results make some sense
+ result['validate_args_context'] = self._task.args.get('validate_args_context', {})
+
+ if 'argument_spec' not in self._task.args:
+ raise AnsibleError('"argument_spec" arg is required in args: %s' % self._task.args)
+
+ # Get the task var called argument_spec. This will contain the arg spec
+ # data dict (for the proper entry point for a role).
+ argument_spec_data = self._task.args.get('argument_spec')
+
+ # the values that were passed in and will be checked against argument_spec
+ provided_arguments = self._task.args.get('provided_arguments', {})
+
+ if not isinstance(argument_spec_data, dict):
+ raise AnsibleError('Incorrect type for argument_spec, expected dict and got %s' % type(argument_spec_data))
+
+ if not isinstance(provided_arguments, dict):
+ raise AnsibleError('Incorrect type for provided_arguments, expected dict and got %s' % type(provided_arguments))
+
+ args_from_vars = self.get_args_from_task_vars(argument_spec_data, task_vars)
+ validator = ArgumentSpecValidator(argument_spec_data)
+ validation_result = validator.validate(combine_vars(args_from_vars, provided_arguments))
+
+ if validation_result.error_messages:
+ result['failed'] = True
+ result['msg'] = 'Validation of arguments failed:\n%s' % '\n'.join(validation_result.error_messages)
+ result['argument_spec_data'] = argument_spec_data
+ result['argument_errors'] = validation_result.error_messages
+ return result
+
+ result['changed'] = False
+ result['msg'] = 'The arg spec validation passed'
+
+ return result
diff --git a/lib/ansible/plugins/become/runas.py b/lib/ansible/plugins/become/runas.py
index c8ae881c..1d6e8f9f 100644
--- a/lib/ansible/plugins/become/runas.py
+++ b/lib/ansible/plugins/become/runas.py
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
- become: runas
+ name: runas
short_description: Run As user
description:
- This become plugins allows your remote/login user to execute commands as another user via the windows runas facility.
diff --git a/lib/ansible/plugins/become/su.py b/lib/ansible/plugins/become/su.py
index e2001655..77e99764 100644
--- a/lib/ansible/plugins/become/su.py
+++ b/lib/ansible/plugins/become/su.py
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
- become: su
+ name: su
short_description: Substitute User
description:
- This become plugins allows your remote/login user to execute commands as another user via the su utility.
@@ -72,6 +72,7 @@ DOCUMENTATION = """
- List of localized strings to match for prompt detection
- If empty we'll use the built in one
default: []
+ type: list
ini:
- section: su_become_plugin
key: localized_prompts
diff --git a/lib/ansible/plugins/become/sudo.py b/lib/ansible/plugins/become/sudo.py
index a7593cce..a4093b85 100644
--- a/lib/ansible/plugins/become/sudo.py
+++ b/lib/ansible/plugins/become/sudo.py
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
- become: sudo
+ name: sudo
short_description: Substitute User DO
description:
- This become plugins allows your remote/login user to execute commands as another user via the sudo utility.
diff --git a/lib/ansible/plugins/cache/__init__.py b/lib/ansible/plugins/cache/__init__.py
index 20df37d8..3613acad 100644
--- a/lib/ansible/plugins/cache/__init__.py
+++ b/lib/ansible/plugins/cache/__init__.py
@@ -369,8 +369,7 @@ class CachePluginAdjudicator(MutableMapping):
self._cache[key] = value
def flush(self):
- for key in self._cache.keys():
- self._plugin.delete(key)
+ self._plugin.flush()
self._cache = {}
def update(self, value):
diff --git a/lib/ansible/plugins/cache/jsonfile.py b/lib/ansible/plugins/cache/jsonfile.py
index 7605dc41..a26828a4 100644
--- a/lib/ansible/plugins/cache/jsonfile.py
+++ b/lib/ansible/plugins/cache/jsonfile.py
@@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
- cache: jsonfile
+ name: jsonfile
short_description: JSON formatted files.
description:
- This cache uses JSON formatted, per host, files saved to the filesystem.
@@ -23,6 +23,7 @@ DOCUMENTATION = '''
ini:
- key: fact_caching_connection
section: defaults
+ type: path
_prefix:
description: User defined prefix to use when creating the JSON files
env:
diff --git a/lib/ansible/plugins/cache/memory.py b/lib/ansible/plugins/cache/memory.py
index 1bccd544..59f97b6e 100644
--- a/lib/ansible/plugins/cache/memory.py
+++ b/lib/ansible/plugins/cache/memory.py
@@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
- cache: memory
+ name: memory
short_description: RAM backed, non persistent
description:
- RAM backed cache that is not persistent.
diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py
index f7089e39..7a06698c 100644
--- a/lib/ansible/plugins/callback/__init__.py
+++ b/lib/ansible/plugins/callback/__init__.py
@@ -74,6 +74,7 @@ class CallbackBase(AnsiblePlugin):
self._display.vvvv('Loading callback plugin %s of type %s, v%s from %s' % (name, ctype, version, sys.modules[self.__module__].__file__))
self.disabled = False
+ self.wants_implicit_tasks = False
self._plugin_options = {}
if options is not None:
@@ -239,13 +240,6 @@ class CallbackBase(AnsiblePlugin):
item = result.get('_ansible_item_label', result.get('item'))
return item
- def _get_item(self, result):
- ''' here for backwards compat, really should have always been named: _get_item_label'''
- cback = getattr(self, 'NAME', os.path.basename(__file__))
- self._display.deprecated("The %s callback plugin should be updated to use the _get_item_label method instead" % cback,
- version="2.11", collection_name='ansible.builtin')
- return self._get_item_label(result)
-
def _process_items(self, result):
# just remove them as now they get handled by individual callbacks
del result._result['results']
@@ -265,6 +259,11 @@ class CallbackBase(AnsiblePlugin):
for hidme in self._hide_in_debug:
result.pop(hidme, None)
+ def _print_task_path(self, task, color=C.COLOR_DEBUG):
+ path = task.get_path()
+ if path:
+ self._display.display(u"task path: %s" % path, color=color)
+
def set_play_context(self, play_context):
pass
diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py
index 129e7eea..0a0eebfd 100644
--- a/lib/ansible/plugins/callback/default.py
+++ b/lib/ansible/plugins/callback/default.py
@@ -6,7 +6,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
- callback: default
+ name: default
type: stdout
short_description: default Ansible screen output
version_added: historical
@@ -77,7 +77,6 @@ class CallbackModule(CallbackBase):
def v2_runner_on_failed(self, result, ignore_errors=False):
- delegated_vars = result._result.get('_ansible_delegated_vars', None)
self._clean_results(result._result, result._task.action)
if self._last_task_banner != result._task._uuid:
@@ -90,11 +89,15 @@ class CallbackModule(CallbackBase):
self._process_items(result)
else:
- if delegated_vars:
- self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'],
+ if result._task.delegate_to:
+ if self._display.verbosity < 2 and self.get_option('show_task_path_on_failure'):
+ self._print_task_path(result._task)
+ self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), result._task.delegate_to,
self._dump_results(result._result)),
color=C.COLOR_ERROR, stderr=self.display_failed_stderr)
else:
+ if self._display.verbosity < 2 and self.get_option('show_task_path_on_failure'):
+ self._print_task_path(result._task)
self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)),
color=C.COLOR_ERROR, stderr=self.display_failed_stderr)
@@ -103,16 +106,16 @@ class CallbackModule(CallbackBase):
def v2_runner_on_ok(self, result):
- delegated_vars = result._result.get('_ansible_delegated_vars', None)
-
if isinstance(result._task, TaskInclude):
+ if self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
return
elif result._result.get('changed', False):
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
- if delegated_vars:
- msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
+ if result._task.delegate_to:
+ msg = "changed: [%s -> %s]" % (result._host.get_name(), result._task.delegate_to)
else:
msg = "changed: [%s]" % result._host.get_name()
color = C.COLOR_CHANGED
@@ -123,8 +126,8 @@ class CallbackModule(CallbackBase):
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
- if delegated_vars:
- msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
+ if result._task.delegate_to:
+ msg = "ok: [%s -> %s]" % (result._host.get_name(), result._task.delegate_to)
else:
msg = "ok: [%s]" % result._host.get_name()
color = C.COLOR_OK
@@ -161,9 +164,8 @@ class CallbackModule(CallbackBase):
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
- delegated_vars = result._result.get('_ansible_delegated_vars', None)
- if delegated_vars:
- msg = "fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result))
+ if result._task.delegate_to:
+ msg = "fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), result._task.delegate_to, self._dump_results(result._result))
else:
msg = "fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result))
self._display.display(msg, color=C.COLOR_UNREACHABLE, stderr=self.display_failed_stderr)
@@ -223,10 +225,9 @@ class CallbackModule(CallbackBase):
else:
checkmsg = ""
self._display.banner(u"%s [%s%s]%s" % (prefix, task_name, args, checkmsg))
+
if self._display.verbosity >= 2:
- path = task.get_path()
- if path:
- self._display.display(u"task path: %s" % path, color=C.COLOR_DEBUG)
+ self._print_task_path(task)
self._last_task_banner = task._uuid
@@ -273,7 +274,6 @@ class CallbackModule(CallbackBase):
def v2_runner_item_on_ok(self, result):
- delegated_vars = result._result.get('_ansible_delegated_vars', None)
if isinstance(result._task, TaskInclude):
return
elif result._result.get('changed', False):
@@ -292,8 +292,8 @@ class CallbackModule(CallbackBase):
msg = 'ok'
color = C.COLOR_OK
- if delegated_vars:
- msg += ": [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
+ if result._task.delegate_to:
+ msg += ": [%s -> %s]" % (result._host.get_name(), result._task.delegate_to)
else:
msg += ": [%s]" % result._host.get_name()
@@ -308,18 +308,21 @@ class CallbackModule(CallbackBase):
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
- delegated_vars = result._result.get('_ansible_delegated_vars', None)
self._clean_results(result._result, result._task.action)
- self._handle_exception(result._result)
+ self._handle_exception(result._result, use_stderr=self.display_failed_stderr)
msg = "failed: "
- if delegated_vars:
- msg += "[%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
+ if result._task.delegate_to:
+ msg += "[%s -> %s]" % (result._host.get_name(), result._task.delegate_to)
else:
msg += "[%s]" % (result._host.get_name())
self._handle_warnings(result._result)
- self._display.display(msg + " (item=%s) => %s" % (self._get_item_label(result._result), self._dump_results(result._result)), color=C.COLOR_ERROR)
+ self._display.display(
+ msg + " (item=%s) => %s" % (self._get_item_label(result._result), self._dump_results(result._result)),
+ color=C.COLOR_ERROR,
+ stderr=self.display_failed_stderr
+ )
def v2_runner_item_on_skipped(self, result):
if self.display_skipped_hosts:
@@ -421,6 +424,16 @@ class CallbackModule(CallbackBase):
msg += "Result was: %s" % self._dump_results(result._result)
self._display.display(msg, color=C.COLOR_DEBUG)
+ def v2_runner_on_async_poll(self, result):
+ host = result._host.get_name()
+ jid = result._result.get('ansible_job_id')
+ started = result._result.get('started')
+ finished = result._result.get('finished')
+ self._display.display(
+ 'ASYNC POLL on %s: jid=%s started=%s finished=%s' % (host, jid, started, finished),
+ color=C.COLOR_DEBUG
+ )
+
def v2_playbook_on_notify(self, handler, host):
if self._display.verbosity > 1:
self._display.display("NOTIFIED HANDLER %s for %s" % (handler.get_name(), host), color=C.COLOR_VERBOSE, screen_only=True)
diff --git a/lib/ansible/plugins/callback/junit.py b/lib/ansible/plugins/callback/junit.py
index 556724d1..8bd1ed6a 100644
--- a/lib/ansible/plugins/callback/junit.py
+++ b/lib/ansible/plugins/callback/junit.py
@@ -6,7 +6,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
- callback: junit
+ name: junit
type: aggregate
short_description: write playbook output to a JUnit file.
version_added: historical
@@ -151,7 +151,7 @@ class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'junit'
- CALLBACK_NEEDS_WHITELIST = True
+ CALLBACK_NEEDS_ENABLED = True
def __init__(self):
super(CallbackModule, self).__init__()
diff --git a/lib/ansible/plugins/callback/minimal.py b/lib/ansible/plugins/callback/minimal.py
index 8d3aef5b..6fcb07d0 100644
--- a/lib/ansible/plugins/callback/minimal.py
+++ b/lib/ansible/plugins/callback/minimal.py
@@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
- callback: minimal
+ name: minimal
type: stdout
short_description: minimal Ansible screen output
version_added: historical
diff --git a/lib/ansible/plugins/callback/oneline.py b/lib/ansible/plugins/callback/oneline.py
index 20b5be7f..fd51b27e 100644
--- a/lib/ansible/plugins/callback/oneline.py
+++ b/lib/ansible/plugins/callback/oneline.py
@@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
- callback: oneline
+ name: oneline
type: stdout
short_description: oneline Ansible screen output
version_added: historical
diff --git a/lib/ansible/plugins/callback/tree.py b/lib/ansible/plugins/callback/tree.py
index f86a3cf5..e7682d5f 100644
--- a/lib/ansible/plugins/callback/tree.py
+++ b/lib/ansible/plugins/callback/tree.py
@@ -6,12 +6,23 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
- callback: tree
- callback_type: notification
+ name: tree
+ type: notification
requirements:
- invoked in the command line
short_description: Save host events to files
version_added: "2.0"
+ options:
+ directory:
+ version_added: '2.11'
+ description: directory that will contain the per host JSON files. Also set by the ``--tree`` option when using adhoc.
+ ini:
+ - section: callback_tree
+ key: directory
+ env:
+ - name: ANSIBLE_CALLBACK_TREE_DIR
+ default: "~/.ansible/tree"
+ type: path
description:
- "This callback is used by the Ansible (adhoc) command line option `-t|--tree`"
- This produces a JSON dump of events in a directory, a file for each host, the directory used MUST be passed as a command line option.
@@ -22,7 +33,7 @@ import os
from ansible.constants import TREE_DIR
from ansible.module_utils._text import to_bytes, to_text
from ansible.plugins.callback import CallbackBase
-from ansible.utils.path import makedirs_safe
+from ansible.utils.path import makedirs_safe, unfrackpath
class CallbackModule(CallbackBase):
@@ -33,15 +44,18 @@ class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'tree'
- CALLBACK_NEEDS_WHITELIST = True
+ CALLBACK_NEEDS_ENABLED = True
- def __init__(self):
- super(CallbackModule, self).__init__()
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+ ''' override to set self.tree '''
- self.tree = TREE_DIR
- if not self.tree:
- self.tree = os.path.expanduser("~/.ansible/tree")
- self._display.warning("The tree callback is defaulting to ~/.ansible/tree, as an invalid directory was provided: %s" % self.tree)
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ if TREE_DIR:
+ # TREE_DIR comes from the CLI option --tree, only avialable for adhoc
+ self.tree = unfrackpath(TREE_DIR)
+ else:
+ self.tree = self.get_option('directory')
def write_tree_file(self, hostname, buf):
''' write something into treedir/hostname '''
@@ -49,15 +63,18 @@ class CallbackModule(CallbackBase):
buf = to_bytes(buf)
try:
makedirs_safe(self.tree)
- path = os.path.join(self.tree, hostname)
+ except (OSError, IOError) as e:
+ self._display.warning(u"Unable to access or create the configured directory (%s): %s" % (to_text(self.tree), to_text(e)))
+
+ try:
+ path = to_bytes(os.path.join(self.tree, hostname))
with open(path, 'wb+') as fd:
fd.write(buf)
except (OSError, IOError) as e:
self._display.warning(u"Unable to write to %s's file: %s" % (hostname, to_text(e)))
def result_to_tree(self, result):
- if self.tree:
- self.write_tree_file(result._host.get_name(), self._dump_results(result._result))
+ self.write_tree_file(result._host.get_name(), self._dump_results(result._result))
def v2_runner_on_ok(self, result):
self.result_to_tree(result)
diff --git a/lib/ansible/plugins/connection/local.py b/lib/ansible/plugins/connection/local.py
index 85cb72b0..182e21cd 100644
--- a/lib/ansible/plugins/connection/local.py
+++ b/lib/ansible/plugins/connection/local.py
@@ -6,12 +6,14 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
- connection: local
+ name: local
short_description: execute on controller
description:
- This connection plugin allows ansible to execute tasks on the Ansible 'controller' instead of on a remote host.
author: ansible (@core)
version_added: historical
+ extends_documentation_fragment:
+ - connection_pipelining
notes:
- The remote user is ignored, the user with which the ansible CLI was executed is used instead.
'''
@@ -82,7 +84,7 @@ class Connection(ConnectionBase):
master = None
stdin = subprocess.PIPE
- if sudoable and self.become and self.become.expect_prompt():
+ if sudoable and self.become and self.become.expect_prompt() and not self.get_option('pipelining'):
# Create a pty if sudoable for privlege escalation that needs it.
# Falls back to using a standard pipe if this fails, which may
# cause the command to fail in certain situations where we are escalating
@@ -102,7 +104,7 @@ class Connection(ConnectionBase):
stderr=subprocess.PIPE,
)
- # if we created a master, we can close the other half of the pty now
+ # if we created a master, we can close the other half of the pty now, otherwise master is stdin
if master is not None:
os.close(stdin)
@@ -138,7 +140,10 @@ class Connection(ConnectionBase):
if not self.become.check_success(become_output):
become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
- os.write(master, to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
+ if master is None:
+ p.stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
+ else:
+ os.write(master, to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
diff --git a/lib/ansible/plugins/connection/paramiko_ssh.py b/lib/ansible/plugins/connection/paramiko_ssh.py
index 62861094..45f23b4a 100644
--- a/lib/ansible/plugins/connection/paramiko_ssh.py
+++ b/lib/ansible/plugins/connection/paramiko_ssh.py
@@ -6,7 +6,7 @@ __metaclass__ = type
DOCUMENTATION = """
author: Ansible Core Team
- connection: paramiko
+ name: paramiko
short_description: Run tasks via python ssh (paramiko)
description:
- Use the python ssh implementation (Paramiko) to connect to targets
@@ -241,6 +241,8 @@ class Connection(ConnectionBase):
self.ssh = SSH_CONNECTION_CACHE[cache_key]
else:
self.ssh = SSH_CONNECTION_CACHE[cache_key] = self._connect_uncached()
+
+ self._connected = True
return self
def _set_log_channel(self, name):
diff --git a/lib/ansible/plugins/connection/psrp.py b/lib/ansible/plugins/connection/psrp.py
index f03eb878..ccef59fc 100644
--- a/lib/ansible/plugins/connection/psrp.py
+++ b/lib/ansible/plugins/connection/psrp.py
@@ -6,7 +6,7 @@ __metaclass__ = type
DOCUMENTATION = """
author: Ansible Core Team
-connection: psrp
+name: psrp
short_description: Run tasks over Microsoft PowerShell Remoting Protocol
description:
- Run commands or put/fetch on a target via PSRP (WinRM plugin)
@@ -14,7 +14,9 @@ description:
underlying transport but instead runs in a PowerShell interpreter.
version_added: "2.7"
requirements:
-- pypsrp (Python library)
+- pypsrp>=0.4.0 (Python library)
+extends_documentation_fragment:
+ - connection_pipelining
options:
# transport options
remote_addr:
@@ -351,6 +353,7 @@ class Connection(ConnectionBase):
self.runspace = None
self.host = None
+ self._last_pipeline = False
self._shell_type = 'powershell'
super(Connection, self).__init__(*args, **kwargs)
@@ -404,11 +407,21 @@ class Connection(ConnectionBase):
)
self._connected = True
+ self._last_pipeline = None
return self
def reset(self):
if not self._connected:
+ self.runspace = None
return
+
+ # Try out best to ensure the runspace is closed to free up server side resources
+ try:
+ self.close()
+ except Exception as e:
+ # There's a good chance the connection was already closed so just log the error and move on
+ display.debug("PSRP reset - failed to closed runspace: %s" % to_text(e))
+
display.vvvvv("PSRP: Reset Connection", host=self._psrp_host)
self.runspace = None
self._connect()
@@ -465,6 +478,8 @@ class Connection(ConnectionBase):
rc, stdout, stderr, local_sha1 = self._put_file_new(in_path, out_path)
else:
+ display.deprecated("Older pypsrp library detected, please update to pypsrp>=0.4.0 to use the newer copy "
+ "method over PSRP.", version="2.13", collection_name='ansible.builtin')
rc, stdout, stderr, local_sha1 = self._put_file_old(in_path, out_path)
if rc != 0:
@@ -668,7 +683,7 @@ end {
if offset == 0: # empty file
yield [""]
- rc, stdout, stderr = self._exec_psrp_script(copy_script, read_gen(), arguments=[out_path], force_stop=True)
+ rc, stdout, stderr = self._exec_psrp_script(copy_script, read_gen(), arguments=[out_path])
return rc, stdout, stderr, sha1_hash.hexdigest()
@@ -719,8 +734,7 @@ if ($bytes_read -gt 0) {
# need to run the setup script outside of the local scope so the
# file stream stays active between fetch operations
rc, stdout, stderr = self._exec_psrp_script(setup_script,
- use_local_scope=False,
- force_stop=True)
+ use_local_scope=False)
if rc != 0:
raise AnsibleError("failed to setup file stream for fetch '%s': %s"
% (out_path, to_native(stderr)))
@@ -735,7 +749,7 @@ if ($bytes_read -gt 0) {
while True:
display.vvvvv("PSRP FETCH %s to %s (offset=%d" %
(in_path, out_path, offset), host=self._psrp_host)
- rc, stdout, stderr = self._exec_psrp_script(read_script % offset, force_stop=True)
+ rc, stdout, stderr = self._exec_psrp_script(read_script % offset)
if rc != 0:
raise AnsibleError("failed to transfer file to '%s': %s"
% (out_path, to_native(stderr)))
@@ -746,7 +760,7 @@ if ($bytes_read -gt 0) {
break
offset += len(data)
- rc, stdout, stderr = self._exec_psrp_script("$fs.Close()", force_stop=True)
+ rc, stdout, stderr = self._exec_psrp_script("$fs.Close()")
if rc != 0:
display.warning("failed to close remote file stream of file "
"'%s': %s" % (in_path, to_native(stderr)))
@@ -758,6 +772,7 @@ if ($bytes_read -gt 0) {
self.runspace.close()
self.runspace = None
self._connected = False
+ self._last_pipeline = None
def _build_kwargs(self):
self._psrp_host = self.get_option('remote_addr')
@@ -864,7 +879,15 @@ if ($bytes_read -gt 0) {
option = self.get_option('_extras')['ansible_psrp_%s' % arg]
self._psrp_conn_kwargs[arg] = option
- def _exec_psrp_script(self, script, input_data=None, use_local_scope=True, force_stop=False, arguments=None):
+ def _exec_psrp_script(self, script, input_data=None, use_local_scope=True, arguments=None):
+ # Check if there's a command on the current pipeline that still needs to be closed.
+ if self._last_pipeline:
+ # Current pypsrp versions raise an exception if the current state was not RUNNING. We manually set it so we
+ # can call stop without any issues.
+ self._last_pipeline.state = PSInvocationState.RUNNING
+ self._last_pipeline.stop()
+ self._last_pipeline = None
+
ps = PowerShell(self.runspace)
ps.add_script(script, use_local_scope=use_local_scope)
if arguments:
@@ -875,14 +898,10 @@ if ($bytes_read -gt 0) {
rc, stdout, stderr = self._parse_pipeline_result(ps)
- if force_stop:
- # This is usually not needed because we close the Runspace after our exec and we skip the call to close the
- # pipeline manually to save on some time. Set to True when running multiple exec calls in the same runspace.
-
- # Current pypsrp versions raise an exception if the current state was not RUNNING. We manually set it so we
- # can call stop without any issues.
- ps.state = PSInvocationState.RUNNING
- ps.stop()
+ # We should really call .stop() on all pipelines that are run to decrement the concurrent command counter on
+ # PSSession but that involves another round trip and is done when the runspace is closed. We instead store the
+ # last pipeline which is closed if another command is run on the runspace.
+ self._last_pipeline = ps
return rc, stdout, stderr
diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py
index ed44a035..3f812c1c 100644
--- a/lib/ansible/plugins/connection/ssh.py
+++ b/lib/ansible/plugins/connection/ssh.py
@@ -8,7 +8,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
- connection: ssh
+ name: ssh
short_description: connect via ssh client binary
description:
- This connection plugin allows ansible to communicate to the target machines via normal ssh command line.
@@ -16,14 +16,21 @@ DOCUMENTATION = '''
a password manually to decrypt an ssh key when using this connection plugin (which is the default). The
use of ``ssh-agent`` is highly recommended.
author: ansible (@core)
+ extends_documentation_fragment:
+ - connection_pipelining
version_added: historical
+ notes:
+ - Many options default to 'None' here but that only means we don't override the ssh tool's defaults and/or configuration.
+ For example, if you specify the port in this plugin it will override any C(Port) entry in your C(.ssh/config).
options:
host:
description: Hostname/ip to connect to.
- default: inventory_hostname
vars:
+ - name: inventory_hostname
- name: ansible_host
- name: ansible_ssh_host
+ - name: delegated_vars['ansible_host']
+ - name: delegated_vars['ansible_ssh_host']
host_key_checking:
description: Determines if ssh should check host keys
type: boolean
@@ -70,6 +77,8 @@ DOCUMENTATION = '''
vars:
- name: ansible_ssh_args
version_added: '2.7'
+ cli:
+ - name: ssh_args
ssh_common_args:
description: Common extra args for all ssh CLI tools
ini:
@@ -81,6 +90,9 @@ DOCUMENTATION = '''
version_added: '2.7'
vars:
- name: ansible_ssh_common_args
+ cli:
+ - name: ssh_common_args
+ default: ''
ssh_executable:
default: ssh
description:
@@ -128,6 +140,9 @@ DOCUMENTATION = '''
- key: scp_extra_args
section: ssh_connection
version_added: '2.7'
+ cli:
+ - name: scp_extra_args
+ default: ''
sftp_extra_args:
description: Extra exclusive to the ``sftp`` CLI
vars:
@@ -139,8 +154,12 @@ DOCUMENTATION = '''
- key: sftp_extra_args
section: ssh_connection
version_added: '2.7'
+ cli:
+ - name: sftp_extra_args
+ default: ''
ssh_extra_args:
description: Extra exclusive to the 'ssh' CLI
+ default: ''
vars:
- name: ansible_ssh_extra_args
env:
@@ -150,10 +169,11 @@ DOCUMENTATION = '''
- key: ssh_extra_args
section: ssh_connection
version_added: '2.7'
- retries:
- # constant: ANSIBLE_SSH_RETRIES
+ cli:
+ - name: ssh_extra_args
+ reconnection_retries:
description: Number of attempts to connect.
- default: 3
+ default: 0
type: integer
env:
- name: ANSIBLE_SSH_RETRIES
@@ -168,7 +188,6 @@ DOCUMENTATION = '''
port:
description: Remote port to connect to.
type: int
- default: 22
ini:
- section: defaults
key: remote_port
@@ -189,27 +208,21 @@ DOCUMENTATION = '''
vars:
- name: ansible_user
- name: ansible_ssh_user
+ cli:
+ - name: user
pipelining:
- default: ANSIBLE_PIPELINING
- description:
- - Pipelining reduces the number of SSH operations required to execute a module on the remote server,
- by executing many Ansible modules without actual file transfer.
- - This can result in a very significant performance improvement when enabled.
- - However this conflicts with privilege escalation (become).
- For example, when using sudo operations you must first disable 'requiretty' in the sudoers file for the target hosts,
- which is why this feature is disabled by default.
env:
- name: ANSIBLE_PIPELINING
- name: ANSIBLE_SSH_PIPELINING
ini:
- - section: defaults
+ - section: connection
key: pipelining
- section: ssh_connection
key: pipelining
- type: boolean
vars:
- name: ansible_pipelining
- name: ansible_ssh_pipelining
+
private_key_file:
description:
- Path to private key file to use for authentication
@@ -221,11 +234,15 @@ DOCUMENTATION = '''
vars:
- name: ansible_private_key_file
- name: ansible_ssh_private_key_file
+ cli:
+ - name: private_key_file
control_path:
description:
- This is the location to save ssh's ControlPath sockets, it uses ssh's variable substitution.
- - Since 2.3, if null, ansible will generate a unique hash. Use `%(directory)s` to indicate where to use the control dir path setting.
+ - Since 2.3, if null (default), ansible will generate a unique hash. Use `%(directory)s` to indicate where to use the control dir path setting.
+ - Before 2.3 it defaulted to `control_path=%(directory)s/ansible-ssh-%%h-%%p-%%r`.
+ - Be aware that this setting is ignored if `-o ControlPath` is set in ssh args.
env:
- name: ANSIBLE_SSH_CONTROL_PATH
ini:
@@ -257,6 +274,15 @@ DOCUMENTATION = '''
vars:
- name: ansible_sftp_batch_mode
version_added: '2.7'
+ ssh_transfer_method:
+ description:
+ - "Preferred method to use when transferring files over ssh"
+ - Setting to 'smart' (default) will try them in order, until one succeeds or they all fail
+ - Using 'piped' creates an ssh pipe with ``dd`` on either side to copy the data
+ choices: ['sftp', 'scp', 'piped', 'smart']
+ env: [{name: ANSIBLE_SSH_TRANSFER_METHOD}]
+ ini:
+ - {key: transfer_method, section: ssh_connection}
scp_if_ssh:
default: smart
description:
@@ -280,6 +306,27 @@ DOCUMENTATION = '''
vars:
- name: ansible_ssh_use_tty
version_added: '2.7'
+ timeout:
+ default: 10
+ description:
+ - This is the default ammount of time we will wait while establishing an ssh connection
+ - It also controls how long we can wait to access reading the connection once established (select on the socket)
+ env:
+ - name: ANSIBLE_TIMEOUT
+ - name: ANSIBLE_SSH_TIMEOUT
+ version_added: '2.11'
+ ini:
+ - key: timeout
+ section: defaults
+ - key: timeout
+ section: ssh_connection
+ version_added: '2.11'
+ vars:
+ - name: ansible_ssh_timeout
+ version_added: '2.11'
+ cli:
+ - name: timeout
+ type: integer
'''
import errno
@@ -395,7 +442,7 @@ def _ssh_retry(func):
"""
@wraps(func)
def wrapped(self, *args, **kwargs):
- remaining_tries = int(C.ANSIBLE_SSH_RETRIES) + 1
+ remaining_tries = int(self.get_option('reconnection_retries')) + 1
cmd_summary = u"%s..." % to_text(args[0])
conn_password = self.get_option('password') or self._play_context.password
for attempt in range(remaining_tries):
@@ -408,6 +455,7 @@ def _ssh_retry(func):
try:
try:
return_tuple = func(self, *args, **kwargs)
+ # TODO: this should come from task
if self._play_context.no_log:
display.vvv(u'rc=%s, stdout and stderr censored due to no log' % return_tuple[0], host=self.host)
else:
@@ -468,11 +516,12 @@ class Connection(ConnectionBase):
def __init__(self, *args, **kwargs):
super(Connection, self).__init__(*args, **kwargs)
+ # TODO: all should come from get_option(), but not might be set at this point yet
self.host = self._play_context.remote_addr
self.port = self._play_context.port
self.user = self._play_context.remote_user
- self.control_path = C.ANSIBLE_SSH_CONTROL_PATH
- self.control_path_dir = C.ANSIBLE_SSH_CONTROL_PATH_DIR
+ self.control_path = None
+ self.control_path_dir = None
# Windows operates differently from a POSIX connection/shell plugin,
# we need to set various properties to ensure SSH on Windows continues
@@ -558,10 +607,15 @@ class Connection(ConnectionBase):
display.vvvvv(u'SSH: %s: (%s)' % (explanation, ')('.join(to_text(a) for a in b_args)), host=self._play_context.remote_addr)
b_command += b_args
- def _build_command(self, binary, *other_args):
+ def _build_command(self, binary, subsystem, *other_args):
'''
- Takes a binary (ssh, scp, sftp) and optional extra arguments and returns
- a command line as an array that can be passed to subprocess.Popen.
+ Takes a executable (ssh, scp, sftp or wrapper) and optional extra arguments and returns the remote command
+ wrapped in local ssh shell commands and ready for execution.
+
+ :arg binary: actual executable to use to execute command.
+ :arg subsystem: type of executable provided, ssh/sftp/scp, needed because wrappers for ssh might have diff names.
+ :arg other_args: dict of, value pairs passed as arguments to the ssh binary
+
'''
b_command = []
@@ -585,10 +639,7 @@ class Connection(ConnectionBase):
if password_prompt:
b_command += [b'-P', to_bytes(password_prompt, errors='surrogate_or_strict')]
- if binary == 'ssh':
- b_command += [to_bytes(self._play_context.ssh_executable, errors='surrogate_or_strict')]
- else:
- b_command += [to_bytes(binary, errors='surrogate_or_strict')]
+ b_command += [to_bytes(binary, errors='surrogate_or_strict')]
#
# Next, additional arguments based on the configuration.
@@ -598,7 +649,7 @@ class Connection(ConnectionBase):
# be disabled if the client side doesn't support the option. However,
# sftp batch mode does not prompt for passwords so it must be disabled
# if not using controlpersist and using sshpass
- if binary == 'sftp' and C.DEFAULT_SFTP_BATCH_MODE:
+ if subsystem == 'sftp' and self.get_option('sftp_batch_mode'):
if conn_password:
b_args = [b'-o', b'BatchMode=no']
self._add_args(b_command, b_args, u'disable batch mode for sshpass')
@@ -607,29 +658,24 @@ class Connection(ConnectionBase):
if self._play_context.verbosity > 3:
b_command.append(b'-vvv')
- #
- # Next, we add [ssh_connection]ssh_args from ansible.cfg.
- #
-
+ # Next, we add ssh_args
ssh_args = self.get_option('ssh_args')
if ssh_args:
b_args = [to_bytes(a, errors='surrogate_or_strict') for a in
self._split_ssh_args(ssh_args)]
self._add_args(b_command, b_args, u"ansible.cfg set ssh_args")
- # Now we add various arguments controlled by configuration file settings
- # (e.g. host_key_checking) or inventory variables (ansible_ssh_port) or
- # a combination thereof.
-
- if not C.HOST_KEY_CHECKING:
+ # Now we add various arguments that have their own specific settings defined in docs above.
+ if not self.get_option('host_key_checking'):
b_args = (b"-o", b"StrictHostKeyChecking=no")
self._add_args(b_command, b_args, u"ANSIBLE_HOST_KEY_CHECKING/host_key_checking disabled")
- if self._play_context.port is not None:
- b_args = (b"-o", b"Port=" + to_bytes(self._play_context.port, nonstring='simplerepr', errors='surrogate_or_strict'))
+ self.port = self.get_option('port')
+ if self.port is not None:
+ b_args = (b"-o", b"Port=" + to_bytes(self.port, nonstring='simplerepr', errors='surrogate_or_strict'))
self._add_args(b_command, b_args, u"ANSIBLE_REMOTE_PORT/remote_port/ansible_port set")
- key = self._play_context.private_key_file
+ key = self.get_option('private_key_file')
if key:
b_args = (b"-o", b'IdentityFile="' + to_bytes(os.path.expanduser(key), errors='surrogate_or_strict') + b'"')
self._add_args(b_command, b_args, u"ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set")
@@ -644,28 +690,29 @@ class Connection(ConnectionBase):
u"ansible_password/ansible_ssh_password not set"
)
- user = self._play_context.remote_user
- if user:
+ self.user = self.get_option('remote_user')
+ if self.user:
self._add_args(
b_command,
- (b"-o", b'User="%s"' % to_bytes(self._play_context.remote_user, errors='surrogate_or_strict')),
+ (b"-o", b'User="%s"' % to_bytes(self.user, errors='surrogate_or_strict')),
u"ANSIBLE_REMOTE_USER/remote_user/ansible_user/user/-u set"
)
+ timeout = self.get_option('timeout')
self._add_args(
b_command,
- (b"-o", b"ConnectTimeout=" + to_bytes(self._play_context.timeout, errors='surrogate_or_strict', nonstring='simplerepr')),
+ (b"-o", b"ConnectTimeout=" + to_bytes(timeout, errors='surrogate_or_strict', nonstring='simplerepr')),
u"ANSIBLE_TIMEOUT/timeout set"
)
# Add in any common or binary-specific arguments from the PlayContext
# (i.e. inventory or task settings or overrides on the command line).
- for opt in (u'ssh_common_args', u'{0}_extra_args'.format(binary)):
- attr = getattr(self._play_context, opt, None)
+ for opt in (u'ssh_common_args', u'{0}_extra_args'.format(subsystem)):
+ attr = self.get_option(opt)
if attr is not None:
b_args = [to_bytes(a, errors='surrogate_or_strict') for a in self._split_ssh_args(attr)]
- self._add_args(b_command, b_args, u"PlayContext set %s" % opt)
+ self._add_args(b_command, b_args, u"Set %s" % opt)
# Check if ControlPersist is enabled and add a ControlPath if one hasn't
# already been set.
@@ -676,6 +723,7 @@ class Connection(ConnectionBase):
self._persistent = True
if not controlpath:
+ self.control_path_dir = self.get_option('control_path_dir')
cpdir = unfrackpath(self.control_path_dir)
b_cpdir = to_bytes(cpdir, errors='surrogate_or_strict')
@@ -684,6 +732,7 @@ class Connection(ConnectionBase):
if not os.access(b_cpdir, os.W_OK):
raise AnsibleError("Cannot write to ControlPath %s" % to_native(cpdir))
+ self.control_path = self.get_option('control_path')
if not self.control_path:
self.control_path = self._create_control_path(
self.host,
@@ -891,13 +940,12 @@ class Connection(ConnectionBase):
# select timeout should be longer than the connect timeout, otherwise
# they will race each other when we can't connect, and the connect
# timeout usually fails
- timeout = 2 + self._play_context.timeout
+ timeout = 2 + self.get_option('timeout')
for fd in (p.stdout, p.stderr):
fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
# TODO: bcoca would like to use SelectSelector() when open
- # filehandles is low, then switch to more efficient ones when higher.
- # select is faster when filehandles is low.
+ # select is faster when filehandles is low and we only ever handle 1.
selector = selectors.DefaultSelector()
selector.register(p.stdout, selectors.EVENT_READ)
selector.register(p.stderr, selectors.EVENT_READ)
@@ -1052,7 +1100,7 @@ class Connection(ConnectionBase):
p.stdout.close()
p.stderr.close()
- if C.HOST_KEY_CHECKING:
+ if self.get_option('host_key_checking'):
if cmd[0] == b"sshpass" and p.returncode == 6:
raise AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support '
'this. Please add this host\'s fingerprint to your known_hosts file to manage this host.')
@@ -1099,17 +1147,18 @@ class Connection(ConnectionBase):
methods = []
# Use the transfer_method option if set, otherwise use scp_if_ssh
- ssh_transfer_method = self._play_context.ssh_transfer_method
+ ssh_transfer_method = self.get_option('ssh_transfer_method')
+ scp_if_ssh = self.get_option('scp_if_ssh')
+ if ssh_transfer_method is None and scp_if_ssh == 'smart':
+ ssh_transfer_method = 'smart'
+
if ssh_transfer_method is not None:
- if not (ssh_transfer_method in ('smart', 'sftp', 'scp', 'piped')):
- raise AnsibleOptionsError('transfer_method needs to be one of [smart|sftp|scp|piped]')
if ssh_transfer_method == 'smart':
methods = smart_methods
else:
methods = [ssh_transfer_method]
else:
# since this can be a non-bool now, we need to handle it correctly
- scp_if_ssh = C.DEFAULT_SCP_IF_SSH
if not isinstance(scp_if_ssh, bool):
scp_if_ssh = scp_if_ssh.lower()
if scp_if_ssh in BOOLEANS:
@@ -1126,7 +1175,7 @@ class Connection(ConnectionBase):
for method in methods:
returncode = stdout = stderr = None
if method == 'sftp':
- cmd = self._build_command(self.get_option('sftp_executable'), to_bytes(host))
+ cmd = self._build_command(self.get_option('sftp_executable'), 'sftp', to_bytes(host))
in_data = u"{0} {1} {2}\n".format(sftp_action, shlex_quote(in_path), shlex_quote(out_path))
in_data = to_bytes(in_data, nonstring='passthru')
(returncode, stdout, stderr) = self._bare_run(cmd, in_data, checkrc=False)
@@ -1134,9 +1183,9 @@ class Connection(ConnectionBase):
scp = self.get_option('scp_executable')
if sftp_action == 'get':
- cmd = self._build_command(scp, u'{0}:{1}'.format(host, self._shell.quote(in_path)), out_path)
+ cmd = self._build_command(scp, 'scp', u'{0}:{1}'.format(host, self._shell.quote(in_path)), out_path)
else:
- cmd = self._build_command(scp, in_path, u'{0}:{1}'.format(host, self._shell.quote(out_path)))
+ cmd = self._build_command(scp, 'scp', in_path, u'{0}:{1}'.format(host, self._shell.quote(out_path)))
in_data = None
(returncode, stdout, stderr) = self._bare_run(cmd, in_data, checkrc=False)
elif method == 'piped':
@@ -1189,7 +1238,7 @@ class Connection(ConnectionBase):
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
- display.vvv(u"ESTABLISH SSH CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self._play_context.remote_addr)
+ display.vvv(u"ESTABLISH SSH CONNECTION FOR USER: {0}".format(self.user), host=self._play_context.remote_addr)
if getattr(self._shell, "_IS_WINDOWS", False):
# Become method 'runas' is done in the wrapper that is executed,
@@ -1208,18 +1257,18 @@ class Connection(ConnectionBase):
# python interactive-mode but the modules are not compatible with the
# interactive-mode ("unexpected indent" mainly because of empty lines)
- ssh_executable = self._play_context.ssh_executable
+ ssh_executable = self.get_option('ssh_executable')
# -tt can cause various issues in some environments so allow the user
# to disable it as a troubleshooting method.
use_tty = self.get_option('use_tty')
if not in_data and sudoable and use_tty:
- args = (ssh_executable, '-tt', self.host, cmd)
+ args = ('-tt', self.host, cmd)
else:
- args = (ssh_executable, self.host, cmd)
+ args = (self.host, cmd)
- cmd = self._build_command(*args)
+ cmd = self._build_command(ssh_executable, 'ssh', *args)
(returncode, stdout, stderr) = self._run(cmd, in_data, sudoable=sudoable)
# When running on Windows, stderr may contain CLIXML encoded output
@@ -1256,23 +1305,25 @@ class Connection(ConnectionBase):
return self._file_transport_command(in_path, out_path, 'get')
def reset(self):
- # If we have a persistent ssh connection (ControlPersist), we can ask it to stop listening.
- cmd = self._build_command(self._play_context.ssh_executable, '-O', 'stop', self.host)
- controlpersist, controlpath = self._persistence_controls(cmd)
- cp_arg = [a for a in cmd if a.startswith(b"ControlPath=")]
- # only run the reset if the ControlPath already exists or if it isn't
- # configured and ControlPersist is set
run_reset = False
- if controlpersist and len(cp_arg) > 0:
- cp_path = cp_arg[0].split(b"=", 1)[-1]
- if os.path.exists(cp_path):
- run_reset = True
- elif controlpersist:
+
+ # If we have a persistent ssh connection (ControlPersist), we can ask it to stop listening.
+ # only run the reset if the ControlPath already exists or if it isn't configured and ControlPersist is set
+ # 'check' will determine this.
+ cmd = self._build_command(self.get_option('ssh_executable'), 'ssh', '-O', 'check', self.host)
+ display.vvv(u'sending connection check: %s' % to_text(cmd))
+ p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout, stderr = p.communicate()
+ status_code = p.wait()
+ if status_code != 0:
+ display.vvv(u"No connection to reset: %s" % to_text(stderr))
+ else:
run_reset = True
if run_reset:
- display.vvv(u'sending stop: %s' % to_text(cmd))
+ cmd = self._build_command(self.get_option('ssh_executable'), 'ssh', '-O', 'stop', self.host)
+ display.vvv(u'sending connection stop: %s' % to_text(cmd))
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
status_code = p.wait()
diff --git a/lib/ansible/plugins/connection/winrm.py b/lib/ansible/plugins/connection/winrm.py
index cd464574..82bada26 100644
--- a/lib/ansible/plugins/connection/winrm.py
+++ b/lib/ansible/plugins/connection/winrm.py
@@ -7,13 +7,15 @@ __metaclass__ = type
DOCUMENTATION = """
author: Ansible Core Team
- connection: winrm
+ name: winrm
short_description: Run tasks over Microsoft's WinRM
description:
- Run commands or put/fetch on a target via WinRM
- This plugin allows extra arguments to be passed that are supported by the protocol but not explicitly defined here.
They should take the form of variables declared with the following pattern `ansible_winrm_<option>`.
version_added: "2.0"
+ extends_documentation_fragment:
+ - connection_pipelining
requirements:
- pywinrm (python library)
options:
@@ -80,6 +82,16 @@ DOCUMENTATION = """
vars:
- name: ansible_winrm_kinit_cmd
type: str
+ kinit_args:
+ description:
+ - Extra arguments to pass to C(kinit) when getting the Kerberos authentication ticket.
+ - By default no extra arguments are passed into C(kinit) unless I(ansible_winrm_kerberos_delegation) is also
+ set. In that case C(-f) is added to the C(kinit) args so a forwardable ticket is retrieved.
+ - If set, the args will overwrite any existing defaults for C(kinit), including C(-f) for a delegated ticket.
+ type: str
+ vars:
+ - name: ansible_winrm_kinit_args
+ version_added: '2.11'
kerberos_mode:
description:
- kerberos usage mode.
@@ -114,6 +126,7 @@ import re
import traceback
import json
import tempfile
+import shlex
import subprocess
HAVE_KERBEROS = False
@@ -293,14 +306,17 @@ class Connection(ConnectionBase):
os.environ["KRB5CCNAME"] = krb5ccname
krb5env = dict(KRB5CCNAME=krb5ccname)
- # stores various flags to call with kinit, we currently only use this
- # to set -f so we can get a forward-able ticket (cred delegation)
- kinit_flags = []
- if boolean(self.get_option('_extras').get('ansible_winrm_kerberos_delegation', False)):
- kinit_flags.append('-f')
-
+ # Stores various flags to call with kinit, these could be explicit args set by 'ansible_winrm_kinit_args' OR
+ # '-f' if kerberos delegation is requested (ansible_winrm_kerberos_delegation).
kinit_cmdline = [self._kinit_cmd]
- kinit_cmdline.extend(kinit_flags)
+ kinit_args = self.get_option('kinit_args')
+ if kinit_args:
+ kinit_args = [to_text(a) for a in shlex.split(kinit_args) if a.strip()]
+ kinit_cmdline.extend(kinit_args)
+
+ elif boolean(self.get_option('_extras').get('ansible_winrm_kerberos_delegation', False)):
+ kinit_cmdline.append('-f')
+
kinit_cmdline.append(principal)
# pexpect runs the process in its own pty so it can correctly send
diff --git a/lib/ansible/plugins/doc_fragments/action_common_attributes.py b/lib/ansible/plugins/doc_fragments/action_common_attributes.py
new file mode 100644
index 00000000..ea8fa7c9
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/action_common_attributes.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+# NOTE: this file is here to allow modules using the new attributes feature to
+# work w/o errors in this version of ansible, it does NOT provide the full
+# attributes feature, just a shim to avoid the fragment not being found.
+
+class ModuleDocFragment(object):
+
+ # Standard documentation fragment
+ DOCUMENTATION = r'''
+options: {}
+'''
diff --git a/lib/ansible/plugins/doc_fragments/connection_pipelining.py b/lib/ansible/plugins/doc_fragments/connection_pipelining.py
new file mode 100644
index 00000000..c1031447
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/connection_pipelining.py
@@ -0,0 +1,29 @@
+# Copyright (c) 2021 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # common shelldocumentation fragment
+ DOCUMENTATION = """
+options:
+ pipelining:
+ default: ANSIBLE_PIPELINING
+ description:
+ - Pipelining reduces the number of connection operations required to execute a module on the remote server,
+ by executing many Ansible modules without actual file transfers.
+ - This can result in a very significant performance improvement when enabled.
+ - However this can conflict with privilege escalation (become).
+ For example, when using sudo operations you must first disable 'requiretty' in the sudoers file for the target hosts,
+ which is why this feature is disabled by default.
+ env:
+ - name: ANSIBLE_PIPELINING
+ ini:
+ - section: defaults
+ key: pipelining
+ type: boolean
+ vars:
+ - name: ansible_pipelining
+"""
diff --git a/lib/ansible/plugins/doc_fragments/constructed.py b/lib/ansible/plugins/doc_fragments/constructed.py
index f2788da0..1e73de70 100644
--- a/lib/ansible/plugins/doc_fragments/constructed.py
+++ b/lib/ansible/plugins/doc_fragments/constructed.py
@@ -29,4 +29,25 @@ options:
description: Add hosts to group based on the values of a variable.
type: list
default: []
+ use_extra_vars:
+ version_added: '2.11'
+ description: Merge extra vars into the available variables for composition (highest precedence).
+ type: bool
+ default: False
+ ini:
+ - section: inventory_plugins
+ key: use_extra_vars
+ env:
+ - name: ANSIBLE_INVENTORY_USE_EXTRA_VARS
+ leading_separator:
+ description:
+ - Use in conjunction with keyed_groups.
+ - By default, a keyed group that does not have a prefix or a separator provided will have a name that starts with an underscore.
+ - This is because the default prefix is "" and the default separator is "_".
+ - Set this option to False to omit the leading underscore (or other separator) if no prefix is given.
+ - If the group name is derived from a mapping the separator is still used to concatenate the items.
+ - To not use a separator in the group name at all, set the separator for the keyed group to an empty string instead.
+ type: boolean
+ default: True
+ version_added: '2.11'
'''
diff --git a/lib/ansible/plugins/doc_fragments/default_callback.py b/lib/ansible/plugins/doc_fragments/default_callback.py
index df3966b9..fd17b260 100644
--- a/lib/ansible/plugins/doc_fragments/default_callback.py
+++ b/lib/ansible/plugins/doc_fragments/default_callback.py
@@ -82,4 +82,17 @@ class ModuleDocFragment(object):
ini:
- key: check_mode_markers
section: defaults
+ show_task_path_on_failure:
+ name: Show file path on failed tasks
+ description:
+ When a task fails, display the path to the file containing the failed task and the line number.
+ This information is displayed automatically for every task when running with C(-vv) or greater verbosity.
+ type: bool
+ default: no
+ env:
+ - name: ANSIBLE_SHOW_TASK_PATH_ON_FAILURE
+ ini:
+ - key: show_task_path_on_failure
+ section: defaults
+ version_added: '2.11'
'''
diff --git a/lib/ansible/plugins/doc_fragments/files.py b/lib/ansible/plugins/doc_fragments/files.py
index 5d6092a6..008642d6 100644
--- a/lib/ansible/plugins/doc_fragments/files.py
+++ b/lib/ansible/plugins/doc_fragments/files.py
@@ -25,6 +25,11 @@ options:
number which will have unexpected results.
- As of Ansible 1.8, the mode may be specified as a symbolic mode (for example, C(u+rwx) or
C(u=rw,g=r,o=r)).
+ - If C(mode) is not specified and the destination file B(does not) exist, the default C(umask) on the system will be used
+ when setting the mode for the newly created file.
+ - If C(mode) is not specified and the destination file B(does) exist, the mode of the existing file will be used.
+ - Specifying C(mode) is the best way to ensure files are created with the correct permissions.
+ See CVE-2020-1736 for further details.
type: raw
owner:
description:
diff --git a/lib/ansible/plugins/doc_fragments/shell_common.py b/lib/ansible/plugins/doc_fragments/shell_common.py
index 7ba4049c..5b018000 100644
--- a/lib/ansible/plugins/doc_fragments/shell_common.py
+++ b/lib/ansible/plugins/doc_fragments/shell_common.py
@@ -19,11 +19,26 @@ options:
key: remote_tmp
vars:
- name: ansible_remote_tmp
+ common_remote_group:
+ name: Enables changing the group ownership of temporary files and directories
+ default: null
+ description:
+ - Checked when Ansible needs to execute a module as a different user.
+ - If setfacl and chown both fail and do not let the different user access the module's files, they will be chgrp'd to this group.
+ - In order for this to work, the remote_user and become_user must share a common group and this setting must be set to that group.
+ env: [{name: ANSIBLE_COMMON_REMOTE_GROUP}]
+ vars:
+ - name: ansible_common_remote_group
+ ini:
+ - {key: common_remote_group, section: defaults}
+ version_added: "2.10"
system_tmpdirs:
description:
- - "List of valid system temporary directories for Ansible to choose when it cannot use
- ``remote_tmp``, normally due to permission issues. These must be world readable, writable,
- and executable."
+ - "List of valid system temporary directories on the managed machine for Ansible to choose
+ when it cannot use ``remote_tmp``, normally due to permission issues. These must be world
+ readable, writable, and executable. This list should only contain directories which the
+ system administrator has pre-created with the proper ownership and permissions otherwise
+ security issues can arise."
default: [ /var/tmp, /tmp ]
type: list
env: [{name: ANSIBLE_SYSTEM_TMPDIRS}]
@@ -43,10 +58,10 @@ options:
vars:
- name: ansible_async_dir
environment:
- type: dict
- default: {}
+ type: list
+ default: [{}]
description:
- - dictionary of environment variables and their values to use when executing commands.
+ - List of dictionaries of environment variables and their values to use when executing commands.
admin_users:
type: list
default: ['root', 'toor']
diff --git a/lib/ansible/plugins/doc_fragments/shell_windows.py b/lib/ansible/plugins/doc_fragments/shell_windows.py
index d6d4d7c5..ddb573fc 100644
--- a/lib/ansible/plugins/doc_fragments/shell_windows.py
+++ b/lib/ansible/plugins/doc_fragments/shell_windows.py
@@ -38,12 +38,11 @@ options:
- Windows only supports C(no) as an option.
type: bool
default: 'no'
- choices:
- - 'no'
+ choices: ['no', False]
environment:
description:
- - Dictionary of environment variables and their values to use when
+ - List of dictionaries of environment variables and their values to use when
executing commands.
- type: dict
- default: {}
+ type: list
+ default: [{}]
"""
diff --git a/lib/ansible/plugins/doc_fragments/url.py b/lib/ansible/plugins/doc_fragments/url.py
index ddb8e4d1..724c28ef 100644
--- a/lib/ansible/plugins/doc_fragments/url.py
+++ b/lib/ansible/plugins/doc_fragments/url.py
@@ -63,4 +63,15 @@ options:
- PEM formatted file that contains your private key to be used for SSL client authentication.
- If C(client_cert) contains both the certificate and key, this option is not required.
type: path
+ use_gssapi:
+ description:
+ - Use GSSAPI to perform the authentication, typically this is for Kerberos or Kerberos through Negotiate
+ authentication.
+ - Requires the Python library L(gssapi,https://github.com/pythongssapi/python-gssapi) to be installed.
+ - Credentials for GSSAPI can be specified with I(url_username)/I(url_password) or with the GSSAPI env var
+ C(KRB5CCNAME) that specified a custom Kerberos credential cache.
+ - NTLM authentication is C(not) supported even if the GSSAPI mech for NTLM has been installed.
+ type: bool
+ default: no
+ version_added: '2.11'
'''
diff --git a/lib/ansible/plugins/filter/core.py b/lib/ansible/plugins/filter/core.py
index 99e9e0e3..06570c27 100644
--- a/lib/ansible/plugins/filter/core.py
+++ b/lib/ansible/plugins/filter/core.py
@@ -20,15 +20,12 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
-import crypt
import glob
import hashlib
-import itertools
import json
import ntpath
import os.path
import re
-import string
import sys
import time
import uuid
@@ -41,8 +38,8 @@ from random import Random, SystemRandom, shuffle
from jinja2.filters import environmentfilter, do_groupby as _do_groupby
from ansible.errors import AnsibleError, AnsibleFilterError, AnsibleFilterTypeError
-from ansible.module_utils.six import iteritems, string_types, integer_types, reraise
-from ansible.module_utils.six.moves import reduce, shlex_quote
+from ansible.module_utils.six import string_types, integer_types, reraise, text_type
+from ansible.module_utils.six.moves import shlex_quote
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.common.collections import is_sequence
from ansible.module_utils.common._collections_compat import Mapping
@@ -110,6 +107,8 @@ def strftime(string_format, second=None):
def quote(a):
''' return its argument quoted for shell usage '''
+ if a is None:
+ a = u''
return shlex_quote(to_text(a))
@@ -253,11 +252,11 @@ def randomize_list(mylist, seed=None):
def get_hash(data, hashtype='sha1'):
-
- try: # see if hash is supported
+ try:
h = hashlib.new(hashtype)
- except Exception:
- return None
+ except Exception as e:
+ # hash is not supported?
+ raise AnsibleFilterError(e)
h.update(to_bytes(data, errors='surrogate_or_strict'))
return h.hexdigest()
@@ -467,19 +466,19 @@ def b64decode(string, encoding='utf-8'):
return to_text(base64.b64decode(to_bytes(string, errors='surrogate_or_strict')), encoding=encoding)
-def flatten(mylist, levels=None):
+def flatten(mylist, levels=None, skip_nulls=True):
ret = []
for element in mylist:
- if element in (None, 'None', 'null'):
- # ignore undefined items
- break
+ if skip_nulls and element in (None, 'None', 'null'):
+ # ignore null items
+ continue
elif is_sequence(element):
if levels is None:
- ret.extend(flatten(element))
+ ret.extend(flatten(element, skip_nulls=skip_nulls))
elif levels >= 1:
# decrement as we go down the stack
- ret.extend(flatten(element, levels=(int(levels) - 1)))
+ ret.extend(flatten(element, levels=(int(levels) - 1), skip_nulls=skip_nulls))
else:
ret.append(element)
else:
@@ -660,4 +659,5 @@ class FilterModule(object):
'dict2items': dict_to_list_of_dict_key_value_elements,
'items2dict': list_of_dict_key_value_elements_to_dict,
'subelements': subelements,
+ 'split': partial(unicode_wrap, text_type.split),
}
diff --git a/lib/ansible/plugins/filter/mathstuff.py b/lib/ansible/plugins/filter/mathstuff.py
index 64d0ba8b..77baa7ef 100644
--- a/lib/ansible/plugins/filter/mathstuff.py
+++ b/lib/ansible/plugins/filter/mathstuff.py
@@ -42,6 +42,12 @@ try:
except ImportError:
HAS_UNIQUE = False
+try:
+ from jinja2.filters import do_max, do_min
+ HAS_MIN_MAX = True
+except ImportError:
+ HAS_MIN_MAX = False
+
display = Display()
@@ -56,11 +62,7 @@ def unique(environment, a, case_sensitive=False, attribute=None):
error = e = None
try:
if HAS_UNIQUE:
- c = do_unique(environment, a, case_sensitive=case_sensitive, attribute=attribute)
- if isinstance(a, Hashable):
- c = set(c)
- else:
- c = list(c)
+ c = list(do_unique(environment, a, case_sensitive=case_sensitive, attribute=attribute))
except TypeError as e:
error = e
_do_fail(e)
@@ -76,13 +78,11 @@ def unique(environment, a, case_sensitive=False, attribute=None):
raise AnsibleFilterError("Ansible's unique filter does not support case_sensitive nor attribute parameters, "
"you need a newer version of Jinja2 that provides their version of the filter.")
- if isinstance(a, Hashable):
- c = set(a)
- else:
- c = []
- for x in a:
- if x not in c:
- c.append(x)
+ c = []
+ for x in a:
+ if x not in c:
+ c.append(x)
+
return c
@@ -123,14 +123,28 @@ def union(environment, a, b):
return c
-def min(a):
- _min = __builtins__.get('min')
- return _min(a)
+@environmentfilter
+def min(environment, a, **kwargs):
+ if HAS_MIN_MAX:
+ return do_min(environment, a, **kwargs)
+ else:
+ if kwargs:
+ raise AnsibleFilterError("Ansible's min filter does not support any keyword arguments. "
+ "You need Jinja2 2.10 or later that provides their version of the filter.")
+ _min = __builtins__.get('min')
+ return _min(a)
-def max(a):
- _max = __builtins__.get('max')
- return _max(a)
+@environmentfilter
+def max(environment, a, **kwargs):
+ if HAS_MIN_MAX:
+ return do_max(environment, a, **kwargs)
+ else:
+ if kwargs:
+ raise AnsibleFilterError("Ansible's max filter does not support any keyword arguments. "
+ "You need Jinja2 2.10 or later that provides their version of the filter.")
+ _max = __builtins__.get('max')
+ return _max(a)
def logarithm(x, base=math.e):
diff --git a/lib/ansible/plugins/inventory/__init__.py b/lib/ansible/plugins/inventory/__init__.py
index d34c1e0a..4ce924f5 100644
--- a/lib/ansible/plugins/inventory/__init__.py
+++ b/lib/ansible/plugins/inventory/__init__.py
@@ -34,7 +34,7 @@ from ansible.module_utils.parsing.convert_bool import boolean
from ansible.module_utils.six import string_types
from ansible.template import Templar
from ansible.utils.display import Display
-from ansible.utils.vars import combine_vars
+from ansible.utils.vars import combine_vars, load_extra_vars
display = Display()
@@ -150,6 +150,11 @@ class BaseInventoryPlugin(AnsiblePlugin):
""" Parses an Inventory Source"""
TYPE = 'generator'
+
+ # 3rd party plugins redefine this to
+ # use custom group name sanitization
+ # since constructed features enforce
+ # it by default.
_sanitize_group_name = staticmethod(to_safe_group_name)
def __init__(self):
@@ -159,6 +164,7 @@ class BaseInventoryPlugin(AnsiblePlugin):
self._options = {}
self.inventory = None
self.display = display
+ self._vars = {}
def parse(self, inventory, loader, path, cache=True):
''' Populates inventory from the given data. Raises an error on any parse failure
@@ -177,6 +183,7 @@ class BaseInventoryPlugin(AnsiblePlugin):
self.loader = loader
self.inventory = inventory
self.templar = Templar(loader=loader)
+ self._vars = load_extra_vars(loader)
def verify_file(self, path):
''' Verify if file is usable by this plugin, base does minimal accessibility check
@@ -229,10 +236,10 @@ class BaseInventoryPlugin(AnsiblePlugin):
# configs are dictionaries
raise AnsibleParserError('inventory source has invalid structure, it should be a dictionary, got: %s' % type(config))
- self.set_options(direct=config)
+ self.set_options(direct=config, var_options=self._vars)
if 'cache' in self._options and self.get_option('cache'):
cache_option_keys = [('_uri', 'cache_connection'), ('_timeout', 'cache_timeout'), ('_prefix', 'cache_prefix')]
- cache_options = dict((opt[0], self.get_option(opt[1])) for opt in cache_option_keys if self.get_option(opt[1]))
+ cache_options = dict((opt[0], self.get_option(opt[1])) for opt in cache_option_keys if self.get_option(opt[1]) is not None)
self._cache = get_cache_plugin(self.get_option('cache_plugin'), **cache_options)
return config
@@ -320,7 +327,7 @@ class Cacheable(object):
def load_cache_plugin(self):
plugin_name = self.get_option('cache_plugin')
cache_option_keys = [('_uri', 'cache_connection'), ('_timeout', 'cache_timeout'), ('_prefix', 'cache_prefix')]
- cache_options = dict((opt[0], self.get_option(opt[1])) for opt in cache_option_keys if self.get_option(opt[1]))
+ cache_options = dict((opt[0], self.get_option(opt[1])) for opt in cache_option_keys if self.get_option(opt[1]) is not None)
self._cache = get_cache_plugin(plugin_name, **cache_options)
def get_cache_key(self, path):
@@ -354,7 +361,17 @@ class Constructable(object):
def _compose(self, template, variables):
''' helper method for plugins to compose variables for Ansible based on jinja2 expression and inventory vars'''
t = self.templar
- t.available_variables = variables
+
+ try:
+ use_extra = self.get_option('use_extra_vars')
+ except Exception:
+ use_extra = False
+
+ if use_extra:
+ t.available_variables = combine_vars(variables, self._vars)
+ else:
+ t.available_variables = variables
+
return t.template('%s%s%s' % (t.environment.variable_start_string, template, t.environment.variable_end_string), disable_lookups=True)
def _set_composite_vars(self, compose, variables, host, strict=False):
@@ -369,15 +386,16 @@ class Constructable(object):
continue
self.inventory.set_variable(host, varname, composite)
- def _add_host_to_composed_groups(self, groups, variables, host, strict=False):
+ def _add_host_to_composed_groups(self, groups, variables, host, strict=False, fetch_hostvars=True):
''' helper to create complex groups for plugins based on jinja2 conditionals, hosts that meet the conditional are added to group'''
# process each 'group entry'
if groups and isinstance(groups, dict):
- variables = combine_vars(variables, self.inventory.get_host(host).get_vars())
+ if fetch_hostvars:
+ variables = combine_vars(variables, self.inventory.get_host(host).get_vars())
self.templar.available_variables = variables
for group_name in groups:
conditional = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % groups[group_name]
- group_name = original_safe(group_name, force=True)
+ group_name = self._sanitize_group_name(group_name)
try:
result = boolean(self.templar.template(conditional))
except Exception as e:
@@ -391,13 +409,14 @@ class Constructable(object):
# add host to group
self.inventory.add_child(group_name, host)
- def _add_host_to_keyed_groups(self, keys, variables, host, strict=False):
+ def _add_host_to_keyed_groups(self, keys, variables, host, strict=False, fetch_hostvars=True):
''' helper to create groups for plugins based on variable values and add the corresponding hosts to it'''
if keys and isinstance(keys, list):
for keyed in keys:
if keyed and isinstance(keyed, dict):
- variables = combine_vars(variables, self.inventory.get_host(host).get_vars())
+ if fetch_hostvars:
+ variables = combine_vars(variables, self.inventory.get_host(host).get_vars())
try:
key = self._compose(keyed.get('key'), variables)
except Exception as e:
@@ -431,6 +450,8 @@ class Constructable(object):
raise AnsibleParserError("Invalid group name format, expected a string or a list of them or dictionary, got: %s" % type(key))
for bare_name in new_raw_group_names:
+ if prefix == '' and self.get_option('leading_separator') is False:
+ sep = ''
gname = self._sanitize_group_name('%s%s%s' % (prefix, sep, bare_name))
result_gname = self.inventory.add_group(gname)
self.inventory.add_host(host, result_gname)
diff --git a/lib/ansible/plugins/inventory/advanced_host_list.py b/lib/ansible/plugins/inventory/advanced_host_list.py
index dae02427..c61a4656 100644
--- a/lib/ansible/plugins/inventory/advanced_host_list.py
+++ b/lib/ansible/plugins/inventory/advanced_host_list.py
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
- inventory: advanced_host_list
+ name: advanced_host_list
version_added: "2.4"
short_description: Parses a 'host list' with ranges
description:
diff --git a/lib/ansible/plugins/inventory/auto.py b/lib/ansible/plugins/inventory/auto.py
index bbb52bae..82d96650 100644
--- a/lib/ansible/plugins/inventory/auto.py
+++ b/lib/ansible/plugins/inventory/auto.py
@@ -6,7 +6,6 @@ __metaclass__ = type
DOCUMENTATION = '''
name: auto
- plugin_type: inventory
author:
- Matt Davis (@nitzmahone)
version_added: "2.5"
diff --git a/lib/ansible/plugins/inventory/constructed.py b/lib/ansible/plugins/inventory/constructed.py
index 1e9c375b..b94dad4f 100644
--- a/lib/ansible/plugins/inventory/constructed.py
+++ b/lib/ansible/plugins/inventory/constructed.py
@@ -6,7 +6,6 @@ __metaclass__ = type
DOCUMENTATION = '''
name: constructed
- plugin_type: inventory
version_added: "2.4"
short_description: Uses Jinja2 to construct vars and groups based on existing inventory.
description:
@@ -20,6 +19,17 @@ DOCUMENTATION = '''
description: token that ensures this is a source file for the 'constructed' plugin.
required: True
choices: ['constructed']
+ use_vars_plugins:
+ description:
+ - Normally, for performance reasons, vars plugins get executed after the inventory sources complete the base inventory,
+ this option allows for getting vars related to hosts/groups from those plugins.
+ - The host_group_vars (enabled by default) 'vars plugin' is the one responsible for reading host_vars/ and group_vars/ directories.
+ - This will execute all vars plugins, even those that are not supposed to execute at the 'inventory' stage.
+ See vars plugins docs for details on 'stage'.
+ required: false
+ default: false
+ type: boolean
+ version_added: '2.11'
extends_documentation_fragment:
- constructed
'''
@@ -71,12 +81,13 @@ EXAMPLES = r'''
import os
from ansible import constants as C
-from ansible.errors import AnsibleParserError
+from ansible.errors import AnsibleParserError, AnsibleOptionsError
from ansible.inventory.helpers import get_group_vars
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
from ansible.module_utils._text import to_native
from ansible.utils.vars import combine_vars
from ansible.vars.fact_cache import FactCache
+from ansible.vars.plugins import get_vars_from_inventory_sources
class InventoryModule(BaseInventoryPlugin, Constructable):
@@ -101,6 +112,28 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
return valid
+ def get_all_host_vars(self, host, loader, sources):
+ ''' requires host object '''
+ return combine_vars(self.host_groupvars(host, loader, sources), self.host_vars(host, loader, sources))
+
+ def host_groupvars(self, host, loader, sources):
+ ''' requires host object '''
+ gvars = get_group_vars(host.get_groups())
+
+ if self.get_option('use_vars_plugins'):
+ gvars = combine_vars(gvars, get_vars_from_inventory_sources(loader, sources, host.get_groups(), 'all'))
+
+ return gvars
+
+ def host_vars(self, host, loader, sources):
+ ''' requires host object '''
+ hvars = host.get_vars()
+
+ if self.get_option('use_vars_plugins'):
+ hvars = combine_vars(hvars, get_vars_from_inventory_sources(loader, sources, [host], 'all'))
+
+ return hvars
+
def parse(self, inventory, loader, path, cache=False):
''' parses the inventory file '''
@@ -108,6 +141,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
self._read_config_data(path)
+ sources = []
+ try:
+ sources = inventory.processed_sources
+ except AttributeError:
+ if self.get_option('use_vars_plugins'):
+ raise AnsibleOptionsError("The option use_vars_plugins requires ansible >= 2.11.")
+
strict = self.get_option('strict')
fact_cache = FactCache()
try:
@@ -115,7 +155,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
for host in inventory.hosts:
# get available variables to templar
- hostvars = combine_vars(get_group_vars(inventory.hosts[host].get_groups()), inventory.hosts[host].get_vars())
+ hostvars = self.get_all_host_vars(inventory.hosts[host], loader, sources)
if host in fact_cache: # adds facts if cache is active
hostvars = combine_vars(hostvars, fact_cache[host])
@@ -123,15 +163,15 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
self._set_composite_vars(self.get_option('compose'), hostvars, host, strict=strict)
# refetch host vars in case new ones have been created above
- hostvars = combine_vars(get_group_vars(inventory.hosts[host].get_groups()), inventory.hosts[host].get_vars())
+ hostvars = self.get_all_host_vars(inventory.hosts[host], loader, sources)
if host in self._cache: # adds facts if cache is active
hostvars = combine_vars(hostvars, self._cache[host])
# constructed groups based on conditionals
- self._add_host_to_composed_groups(self.get_option('groups'), hostvars, host, strict=strict)
+ self._add_host_to_composed_groups(self.get_option('groups'), hostvars, host, strict=strict, fetch_hostvars=False)
# constructed groups based variable values
- self._add_host_to_keyed_groups(self.get_option('keyed_groups'), hostvars, host, strict=strict)
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), hostvars, host, strict=strict, fetch_hostvars=False)
except Exception as e:
- raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e)))
+ raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e)), orig_exc=e)
diff --git a/lib/ansible/plugins/inventory/generator.py b/lib/ansible/plugins/inventory/generator.py
index 4a7d3b7a..27575dd0 100644
--- a/lib/ansible/plugins/inventory/generator.py
+++ b/lib/ansible/plugins/inventory/generator.py
@@ -6,7 +6,6 @@ __metaclass__ = type
DOCUMENTATION = '''
name: generator
- plugin_type: inventory
version_added: "2.6"
short_description: Uses Jinja2 to construct hosts and groups from patterns
description:
diff --git a/lib/ansible/plugins/inventory/host_list.py b/lib/ansible/plugins/inventory/host_list.py
index 4a7a98d8..eee85165 100644
--- a/lib/ansible/plugins/inventory/host_list.py
+++ b/lib/ansible/plugins/inventory/host_list.py
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
- inventory: host_list
+ name: host_list
version_added: "2.4"
short_description: Parses a 'host list' string
description:
diff --git a/lib/ansible/plugins/inventory/ini.py b/lib/ansible/plugins/inventory/ini.py
index 2175f421..be9ec2fc 100644
--- a/lib/ansible/plugins/inventory/ini.py
+++ b/lib/ansible/plugins/inventory/ini.py
@@ -4,7 +4,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
- inventory: ini
+ name: ini
version_added: "2.4"
short_description: Uses an Ansible INI file as inventory source.
description:
@@ -29,49 +29,47 @@ DOCUMENTATION = '''
The YAML inventory plugin processes variable values consistently and correctly.
'''
-EXAMPLES = '''
- example1: |
- # example cfg file
- [web]
- host1
- host2 ansible_port=222 # defined inline, interpreted as an integer
+EXAMPLES = '''# fmt: ini
+# Example 1
+[web]
+host1
+host2 ansible_port=222 # defined inline, interpreted as an integer
- [web:vars]
- http_port=8080 # all members of 'web' will inherit these
- myvar=23 # defined in a :vars section, interpreted as a string
+[web:vars]
+http_port=8080 # all members of 'web' will inherit these
+myvar=23 # defined in a :vars section, interpreted as a string
- [web:children] # child groups will automatically add their hosts to parent group
- apache
- nginx
+[web:children] # child groups will automatically add their hosts to parent group
+apache
+nginx
- [apache]
- tomcat1
- tomcat2 myvar=34 # host specific vars override group vars
- tomcat3 mysecret="'03#pa33w0rd'" # proper quoting to prevent value changes
+[apache]
+tomcat1
+tomcat2 myvar=34 # host specific vars override group vars
+tomcat3 mysecret="'03#pa33w0rd'" # proper quoting to prevent value changes
- [nginx]
- jenkins1
+[nginx]
+jenkins1
- [nginx:vars]
- has_java = True # vars in child groups override same in parent
+[nginx:vars]
+has_java = True # vars in child groups override same in parent
- [all:vars]
- has_java = False # 'all' is 'top' parent
+[all:vars]
+has_java = False # 'all' is 'top' parent
- example2: |
- # other example config
- host1 # this is 'ungrouped'
+# Example 2
+host1 # this is 'ungrouped'
- # both hosts have same IP but diff ports, also 'ungrouped'
- host2 ansible_host=127.0.0.1 ansible_port=44
- host3 ansible_host=127.0.0.1 ansible_port=45
+# both hosts have same IP but diff ports, also 'ungrouped'
+host2 ansible_host=127.0.0.1 ansible_port=44
+host3 ansible_host=127.0.0.1 ansible_port=45
- [g1]
- host4
+[g1]
+host4
- [g2]
- host4 # same host as above, but member of 2 groups, will inherit vars from both
- # inventory hostnames are unique
+[g2]
+host4 # same host as above, but member of 2 groups, will inherit vars from both
+ # inventory hostnames are unique
'''
import ast
diff --git a/lib/ansible/plugins/inventory/script.py b/lib/ansible/plugins/inventory/script.py
index b4094a56..d94f35d8 100644
--- a/lib/ansible/plugins/inventory/script.py
+++ b/lib/ansible/plugins/inventory/script.py
@@ -6,7 +6,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
- inventory: script
+ name: script
version_added: "2.4"
short_description: Executes an inventory script that returns JSON
options:
diff --git a/lib/ansible/plugins/inventory/toml.py b/lib/ansible/plugins/inventory/toml.py
index 26ad600a..50e1ced5 100644
--- a/lib/ansible/plugins/inventory/toml.py
+++ b/lib/ansible/plugins/inventory/toml.py
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
- inventory: toml
+ name: toml
version_added: "2.8"
short_description: Uses a specific TOML file as an inventory source.
description:
@@ -15,76 +15,75 @@ DOCUMENTATION = r'''
- Requires the 'toml' python library
'''
-EXAMPLES = r'''
-# Following are examples of 3 different inventories in TOML format
-example1: |
- [all.vars]
- has_java = false
+EXAMPLES = r'''# fmt: toml
+# Example 1
+[all.vars]
+has_java = false
- [web]
- children = [
- "apache",
- "nginx"
- ]
- vars = { http_port = 8080, myvar = 23 }
+[web]
+children = [
+ "apache",
+ "nginx"
+]
+vars = { http_port = 8080, myvar = 23 }
- [web.hosts]
- host1 = {}
- host2 = { ansible_port = 222 }
+[web.hosts]
+host1 = {}
+host2 = { ansible_port = 222 }
- [apache.hosts]
- tomcat1 = {}
- tomcat2 = { myvar = 34 }
- tomcat3 = { mysecret = "03#pa33w0rd" }
+[apache.hosts]
+tomcat1 = {}
+tomcat2 = { myvar = 34 }
+tomcat3 = { mysecret = "03#pa33w0rd" }
- [nginx.hosts]
- jenkins1 = {}
+[nginx.hosts]
+jenkins1 = {}
- [nginx.vars]
- has_java = true
+[nginx.vars]
+has_java = true
-example2: |
- [all.vars]
- has_java = false
+# Example 2
+[all.vars]
+has_java = false
- [web]
- children = [
- "apache",
- "nginx"
- ]
+[web]
+children = [
+ "apache",
+ "nginx"
+]
- [web.vars]
- http_port = 8080
- myvar = 23
+[web.vars]
+http_port = 8080
+myvar = 23
- [web.hosts.host1]
- [web.hosts.host2]
- ansible_port = 222
+[web.hosts.host1]
+[web.hosts.host2]
+ansible_port = 222
- [apache.hosts.tomcat1]
+[apache.hosts.tomcat1]
- [apache.hosts.tomcat2]
- myvar = 34
+[apache.hosts.tomcat2]
+myvar = 34
- [apache.hosts.tomcat3]
- mysecret = "03#pa33w0rd"
+[apache.hosts.tomcat3]
+mysecret = "03#pa33w0rd"
- [nginx.hosts.jenkins1]
+[nginx.hosts.jenkins1]
- [nginx.vars]
- has_java = true
+[nginx.vars]
+has_java = true
-example3: |
- [ungrouped.hosts]
- host1 = {}
- host2 = { ansible_host = "127.0.0.1", ansible_port = 44 }
- host3 = { ansible_host = "127.0.0.1", ansible_port = 45 }
+# Example 3
+[ungrouped.hosts]
+host1 = {}
+host2 = { ansible_host = "127.0.0.1", ansible_port = 44 }
+host3 = { ansible_host = "127.0.0.1", ansible_port = 45 }
- [g1.hosts]
- host4 = {}
+[g1.hosts]
+host4 = {}
- [g2.hosts]
- host4 = {}
+[g2.hosts]
+host4 = {}
'''
import os
@@ -108,11 +107,6 @@ except ImportError:
display = Display()
-WARNING_MSG = (
- 'The TOML inventory format is marked as preview, which means that it is not guaranteed to have a backwards '
- 'compatible interface.'
-)
-
if HAS_TOML and hasattr(toml, 'TomlEncoder'):
class AnsibleTomlEncoder(toml.TomlEncoder):
@@ -236,8 +230,6 @@ class InventoryModule(BaseFileInventoryPlugin):
'The TOML inventory plugin requires the python "toml" library'
)
- display.warning(WARNING_MSG)
-
super(InventoryModule, self).parse(inventory, loader, path)
self.set_options()
diff --git a/lib/ansible/plugins/inventory/yaml.py b/lib/ansible/plugins/inventory/yaml.py
index dc882c6d..f927e00b 100644
--- a/lib/ansible/plugins/inventory/yaml.py
+++ b/lib/ansible/plugins/inventory/yaml.py
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
- inventory: yaml
+ name: yaml
version_added: "2.4"
short_description: Uses a specific YAML file as an inventory source.
description:
diff --git a/lib/ansible/plugins/loader.py b/lib/ansible/plugins/loader.py
index 3c241274..33dc10c2 100644
--- a/lib/ansible/plugins/loader.py
+++ b/lib/ansible/plugins/loader.py
@@ -39,12 +39,6 @@ except ImportError:
Version = None
try:
- # use C version if possible for speedup
- from yaml import CSafeLoader as SafeLoader
-except ImportError:
- from yaml import SafeLoader
-
-try:
import importlib.util
imp = None
except ImportError:
@@ -61,7 +55,7 @@ def get_all_plugin_loaders():
def add_all_plugin_dirs(path):
''' add any existing plugin dirs in the path provided '''
- b_path = to_bytes(path, errors='surrogate_or_strict')
+ b_path = os.path.expanduser(to_bytes(path, errors='surrogate_or_strict'))
if os.path.isdir(b_path):
for name, obj in get_all_plugin_loaders():
if obj.subdir:
@@ -313,7 +307,7 @@ class PluginLoader:
parts = self.package.split('.')[1:]
for parent_mod in parts:
m = getattr(m, parent_mod)
- self.package_path = os.path.dirname(m.__file__)
+ self.package_path = to_text(os.path.dirname(m.__file__), errors='surrogate_or_strict')
if subdirs:
return self._all_directories(self.package_path)
return [self.package_path]
@@ -332,12 +326,15 @@ class PluginLoader:
# look in any configured plugin paths, allow one level deep for subcategories
if self.config is not None:
for path in self.config:
- path = os.path.realpath(os.path.expanduser(path))
+ path = os.path.abspath(os.path.expanduser(path))
if subdirs:
contents = glob.glob("%s/*" % path) + glob.glob("%s/*/*" % path)
for c in contents:
+ c = to_text(c, errors='surrogate_or_strict')
if os.path.isdir(c) and c not in ret:
ret.append(PluginPathContext(c, False))
+
+ path = to_text(path, errors='surrogate_or_strict')
if path not in ret:
ret.append(PluginPathContext(path, False))
@@ -479,6 +476,11 @@ class PluginLoader:
if redirect:
# FIXME: remove once this is covered in debug or whatever
display.vv("redirecting (type: {0}) {1} to {2}".format(plugin_type, fq_name, redirect))
+ # The name doing the redirection is added at the beginning of _resolve_plugin_step,
+ # but if the unqualified name is used in conjunction with the collections keyword, only
+ # the unqualified name is in the redirect list.
+ if fq_name not in plugin_load_context.redirect_list:
+ plugin_load_context.redirect_list.append(fq_name)
return plugin_load_context.redirect(redirect)
# TODO: non-FQCN case, do we support `.` prefix for current collection, assume it with no dots, require it for subdirs in current, or ?
@@ -606,7 +608,12 @@ class PluginLoader:
# 'ansible.builtin' should be handled here. This means only internal, or builtin, paths are searched.
plugin_load_context = self._find_fq_plugin(candidate_name, suffix, plugin_load_context=plugin_load_context)
- if candidate_name != plugin_load_context.original_name and candidate_name not in plugin_load_context.redirect_list:
+ # Pending redirects are added to the redirect_list at the beginning of _resolve_plugin_step.
+ # Once redirects are resolved, ensure the final FQCN is added here.
+ # e.g. 'ns.coll.module' is included rather than only 'module' if a collections list is provided:
+ # - module:
+ # collections: ['ns.coll']
+ if plugin_load_context.resolved and candidate_name not in plugin_load_context.redirect_list:
plugin_load_context.redirect_list.append(candidate_name)
if plugin_load_context.resolved or plugin_load_context.pending_redirect: # if we got an answer or need to chase down a redirect, return
@@ -659,16 +666,18 @@ class PluginLoader:
# we need to make sure we don't want to add additional directories
# (add_directory()) once we start using the iterator.
# We can use _get_paths_with_context() since add_directory() forces a cache refresh.
- for path_context in (p for p in self._get_paths_with_context() if p.path not in self._searched_paths and os.path.isdir(p.path)):
- path = path_context.path
+ for path_with_context in (p for p in self._get_paths_with_context() if p.path not in self._searched_paths and os.path.isdir(to_bytes(p.path))):
+ path = path_with_context.path
+ b_path = to_bytes(path)
display.debug('trying %s' % path)
plugin_load_context.load_attempts.append(path)
+ internal = path_with_context.internal
try:
- full_paths = (os.path.join(path, f) for f in os.listdir(path))
+ full_paths = (os.path.join(b_path, f) for f in os.listdir(b_path))
except OSError as e:
display.warning("Error accessing plugin paths: %s" % to_text(e))
- for full_path in (f for f in full_paths if os.path.isfile(f) and not f.endswith('__init__.py')):
+ for full_path in (to_native(f) for f in full_paths if os.path.isfile(f) and not f.endswith(b'__init__.py')):
full_name = os.path.basename(full_path)
# HACK: We have no way of executing python byte compiled files as ansible modules so specifically exclude them
@@ -676,15 +685,15 @@ class PluginLoader:
# For all other plugins we want .pyc and .pyo should be valid
if any(full_path.endswith(x) for x in C.MODULE_IGNORE_EXTS):
continue
-
splitname = os.path.splitext(full_name)
base_name = splitname[0]
- internal = path_context.internal
try:
extension = splitname[1]
except IndexError:
extension = ''
+ # everything downstream expects unicode
+ full_path = to_text(full_path, errors='surrogate_or_strict')
# Module found, now enter it into the caches that match this file
if base_name not in self._plugin_path_cache['']:
self._plugin_path_cache[''][base_name] = PluginPathContext(full_path, internal)
@@ -897,7 +906,7 @@ class PluginLoader:
found_in_cache = True
for i in self._get_paths():
- all_matches.extend(glob.glob(os.path.join(i, "*.py")))
+ all_matches.extend(glob.glob(to_native(os.path.join(i, "*.py"))))
loaded_modules = set()
for path in sorted(all_matches, key=os.path.basename):
@@ -967,50 +976,59 @@ class Jinja2Loader(PluginLoader):
The filter and test plugins are Jinja2 plugins encapsulated inside of our plugin format.
The way the calling code is setup, we need to do a few things differently in the all() method
+
+ We can't use the base class version because of file == plugin assumptions and dedupe logic
"""
def find_plugin(self, name, collection_list=None):
- # Nothing using Jinja2Loader use this method. We can't use the base class version because
- # we deduplicate differently than the base class
- if '.' in name:
+
+ if '.' in name: # NOTE: this is wrong way, use: AnsibleCollectionRef.is_valid_fqcr(name) or collection_list
return super(Jinja2Loader, self).find_plugin(name, collection_list=collection_list)
- raise AnsibleError('No code should call find_plugin for Jinja2Loaders (Not implemented)')
+ # Nothing is currently using this method
+ raise AnsibleError('No code should call "find_plugin" for Jinja2Loaders (Not implemented)')
def get(self, name, *args, **kwargs):
- # Nothing using Jinja2Loader use this method. We can't use the base class version because
- # we deduplicate differently than the base class
- if '.' in name:
+
+ if '.' in name: # NOTE: this is wrong way to detect collection, see note above for example
return super(Jinja2Loader, self).get(name, *args, **kwargs)
- raise AnsibleError('No code should call find_plugin for Jinja2Loaders (Not implemented)')
+ # Nothing is currently using this method
+ raise AnsibleError('No code should call "get" for Jinja2Loaders (Not implemented)')
def all(self, *args, **kwargs):
"""
Differences with :meth:`PluginLoader.all`:
- * We do not deduplicate ansible plugin names. This is because we don't care about our
- plugin names, here. We care about the names of the actual jinja2 plugins which are inside
- of our plugins.
- * We reverse the order of the list of plugins compared to other PluginLoaders. This is
+ * Unlike other plugin types, file != plugin, a file can contain multiple plugins (of same type).
+ This is why we do not deduplicate ansible file names at this point, we mostly care about
+ the names of the actual jinja2 plugins which are inside of our files.
+ * We reverse the order of the list of files compared to other PluginLoaders. This is
because of how calling code chooses to sync the plugins from the list. It adds all the
- Jinja2 plugins from one of our Ansible plugins into a dict. Then it adds the Jinja2
- plugins from the next Ansible plugin, overwriting any Jinja2 plugins that had the same
+ Jinja2 plugins from one of our Ansible files into a dict. Then it adds the Jinja2
+ plugins from the next Ansible file, overwriting any Jinja2 plugins that had the same
name. This is an encapsulation violation (the PluginLoader should not know about what
calling code does with the data) but we're pushing the common code here. We'll fix
this in the future by moving more of the common code into this PluginLoader.
* We return a list. We could iterate the list instead but that's extra work for no gain because
the API receiving this doesn't care. It just needs an iterable
+ * This method will NOT fetch collection plugins, only those that would be expected under 'ansible.legacy'.
"""
- # We don't deduplicate ansible plugin names. Instead, calling code deduplicates jinja2
- # plugin names.
+ # We don't deduplicate ansible file names.
+ # Instead, calling code deduplicates jinja2 plugin names when loading each file.
kwargs['_dedupe'] = False
- # We have to instantiate a list of all plugins so that we can reverse it. We reverse it so
- # that calling code will deduplicate this correctly.
- plugins = [p for p in super(Jinja2Loader, self).all(*args, **kwargs)]
- plugins.reverse()
+ # TODO: move this to initalization and extract/dedupe plugin names in loader and offset this from
+ # caller. It would have to cache/refresh on add_directory to reevaluate plugin list and dedupe.
+ # Another option is to always prepend 'ansible.legac'y and force the collection path to
+ # load/find plugins, just need to check compatiblity of that approach.
+ # This would also enable get/find_plugin for these type of plugins.
+
+ # We have to instantiate a list of all files so that we can reverse the list.
+ # We reverse it so that calling code will deduplicate this correctly.
+ files = list(super(Jinja2Loader, self).all(*args, **kwargs))
+ files .reverse()
- return plugins
+ return files
def _load_plugin_filter():
diff --git a/lib/ansible/plugins/lookup/config.py b/lib/ansible/plugins/lookup/config.py
index ceea80c3..245ea67c 100644
--- a/lib/ansible/plugins/lookup/config.py
+++ b/lib/ansible/plugins/lookup/config.py
@@ -4,7 +4,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
- lookup: config
+ name: config
author: Ansible Core Team
version_added: "2.5"
short_description: Lookup current Ansible configuration values
diff --git a/lib/ansible/plugins/lookup/csvfile.py b/lib/ansible/plugins/lookup/csvfile.py
index af76ed1b..272b4a77 100644
--- a/lib/ansible/plugins/lookup/csvfile.py
+++ b/lib/ansible/plugins/lookup/csvfile.py
@@ -5,22 +5,23 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
- lookup: csvfile
+ name: csvfile
author: Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
version_added: "1.5"
short_description: read data from a TSV or CSV file
description:
- The csvfile lookup reads the contents of a file in CSV (comma-separated value) format.
- The lookup looks for the row where the first column matches keyname, and returns the value in the second column, unless a different column is specified.
+ The lookup looks for the row where the first column matches keyname (which can be multiple words)
+ and returns the value in the C(col) column (default 1, which indexed from 0 means the second column in the file).
options:
col:
- description: column to return (0 index).
+ description: column to return (0 indexed).
default: "1"
default:
description: what to return if the value is not found in the file.
default: ''
delimiter:
- description: field separator in the file, for a tab you can specify "TAB" or "t".
+ description: field separator in the file, for a tab you can specify C(TAB) or C(\\t).
default: TAB
file:
description: name of the CSV/TSV file to open.
@@ -31,6 +32,10 @@ DOCUMENTATION = """
version_added: "2.1"
notes:
- The default is for TSV files (tab delimited) not CSV (comma delimited) ... yes the name is misleading.
+ - As of version 2.11, the search parameter (text that must match the first column of the file) and filename parameter can be multi-word.
+ - For historical reasons, in the search keyname, quotes are treated
+ literally and cannot be used around the string unless they appear
+ (escaped as required) in the first column of the file you are parsing.
"""
EXAMPLES = """
@@ -64,6 +69,7 @@ import codecs
import csv
from ansible.errors import AnsibleError, AnsibleAssertionError
+from ansible.parsing.splitter import parse_kv
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.six import PY2
from ansible.module_utils._text import to_bytes, to_native, to_text
@@ -131,8 +137,12 @@ class LookupModule(LookupBase):
ret = []
for term in terms:
- params = term.split()
- key = params[0]
+ kv = parse_kv(term)
+
+ if '_raw_params' not in kv:
+ raise AnsibleError('Search key is required but was not found')
+
+ key = kv['_raw_params']
paramvals = {
'col': "1", # column to return
@@ -144,8 +154,9 @@ class LookupModule(LookupBase):
# parameters specified?
try:
- for param in params[1:]:
- name, value = param.split('=')
+ for name, value in kv.items():
+ if name == '_raw_params':
+ continue
if name not in paramvals:
raise AnsibleAssertionError('%s not in paramvals' % name)
paramvals[name] = value
diff --git a/lib/ansible/plugins/lookup/dict.py b/lib/ansible/plugins/lookup/dict.py
index 95480a33..5a83d9e8 100644
--- a/lib/ansible/plugins/lookup/dict.py
+++ b/lib/ansible/plugins/lookup/dict.py
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
- lookup: dict
+ name: dict
version_added: "1.5"
short_description: returns key/value pair items from dictionaries
description:
diff --git a/lib/ansible/plugins/lookup/env.py b/lib/ansible/plugins/lookup/env.py
index bb0fae5f..154e7553 100644
--- a/lib/ansible/plugins/lookup/env.py
+++ b/lib/ansible/plugins/lookup/env.py
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
- lookup: env
+ name: env
author: Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
version_added: "0.9"
short_description: Read the value of environment variables
diff --git a/lib/ansible/plugins/lookup/file.py b/lib/ansible/plugins/lookup/file.py
index 7b426a6c..04ddc4b1 100644
--- a/lib/ansible/plugins/lookup/file.py
+++ b/lib/ansible/plugins/lookup/file.py
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
- lookup: file
+ name: file
author: Daniel Hokka Zakrisson (!UNKNOWN) <daniel@hozac.com>
version_added: "0.9"
short_description: read file contents
diff --git a/lib/ansible/plugins/lookup/fileglob.py b/lib/ansible/plugins/lookup/fileglob.py
index aa5d7d34..e9ba25af 100644
--- a/lib/ansible/plugins/lookup/fileglob.py
+++ b/lib/ansible/plugins/lookup/fileglob.py
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
- lookup: fileglob
+ name: fileglob
author: Michael DeHaan
version_added: "1.4"
short_description: list files matching a pattern
@@ -76,7 +76,8 @@ class LookupModule(LookupBase):
for dwimmed_path in found_paths:
if dwimmed_path:
globbed = glob.glob(to_bytes(os.path.join(dwimmed_path, term_file), errors='surrogate_or_strict'))
- ret.extend(to_text(g, errors='surrogate_or_strict') for g in globbed if os.path.isfile(g))
- if ret:
+ term_results = [to_text(g, errors='surrogate_or_strict') for g in globbed if os.path.isfile(g)]
+ if term_results:
+ ret.extend(term_results)
break
return ret
diff --git a/lib/ansible/plugins/lookup/first_found.py b/lib/ansible/plugins/lookup/first_found.py
index 54bc6849..929a905f 100644
--- a/lib/ansible/plugins/lookup/first_found.py
+++ b/lib/ansible/plugins/lookup/first_found.py
@@ -5,26 +5,30 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
- lookup: first_found
+ name: first_found
author: Seth Vidal (!UNKNOWN) <skvidal@fedoraproject.org>
version_added: historical
short_description: return first file found from list
description:
- - this lookup checks a list of files and paths and returns the full path to the first combination found.
+ - This lookup checks a list of files and paths and returns the full path to the first combination found.
- As all lookups, when fed relative paths it will try use the current task's location first and go up the chain
- to the containing role/play/include/etc's location.
+ to the containing locations of role / play / include and so on.
- The list of files has precedence over the paths searched.
- i.e, A task in a role has a 'file1' in the play's relative path, this will be used, 'file2' in role's relative path will not.
+ For example, A task in a role has a 'file1' in the play's relative path, this will be used, 'file2' in role's relative path will not.
- Either a list of files C(_terms) or a key `files` with a list of files is required for this plugin to operate.
notes:
- This lookup can be used in 'dual mode', either passing a list of file names or a dictionary that has C(files) and C(paths).
options:
_terms:
- description: list of file names
+ description: A list of file names.
files:
- description: list of file names
+ description: A list of file names.
+ type: list
+ default: []
paths:
- description: list of paths in which to look for the files
+ description: A list of paths in which to look for the files.
+ type: list
+ default: []
skip:
type: boolean
default: False
@@ -33,42 +37,45 @@ DOCUMENTATION = """
EXAMPLES = """
- name: show first existing file or ignore if none do
- debug: msg={{lookup('first_found', findme, errors='ignore')}}
+ debug:
+ msg: "{{ lookup('first_found', findme, errors='ignore') }}"
vars:
findme:
- - "/path/to/foo.txt"
- - "bar.txt" # will be looked in files/ dir relative to role and/or play
- - "/path/to/biz.txt"
+ - /path/to/foo.txt
+ - bar.txt # will be looked in files/ dir relative to role and/or play
+ - /path/to/biz.txt
-- name: |
- include tasks only if files exist. Note the use of query() to return
- a blank list for the loop if no files are found.
- import_tasks: '{{ item }}'
+- name: include tasks only if files exist.
+ include_tasks:
+ file: "{{ query('first_found', params) }}"
vars:
params:
files:
- path/tasks.yaml
- path/other_tasks.yaml
- loop: "{{ query('first_found', params, errors='ignore') }}"
- name: |
copy first existing file found to /some/file,
looking in relative directories from where the task is defined and
including any play objects that contain it
- copy: src={{lookup('first_found', findme)}} dest=/some/file
+ copy:
+ src: "{{ lookup('first_found', findme) }}"
+ dest: /some/file
vars:
findme:
- foo
- - "{{inventory_hostname}}"
+ - "{{ inventory_hostname }}"
- bar
- name: same copy but specific paths
- copy: src={{lookup('first_found', params)}} dest=/some/file
+ copy:
+ src: "{{ lookup('first_found', params) }}"
+ dest: /some/file
vars:
params:
files:
- foo
- - "{{inventory_hostname}}"
+ - "{{ inventory_hostname }}"
- bar
paths:
- /tmp/production
@@ -76,7 +83,7 @@ EXAMPLES = """
- name: INTERFACES | Create Ansible header for /etc/network/interfaces
template:
- src: "{{ lookup('first_found', findme)}}"
+ src: "{{ lookup('first_found', findme) }}"
dest: "/etc/foo.conf"
vars:
findme:
@@ -84,12 +91,12 @@ EXAMPLES = """
- "default_foo.conf"
- name: read vars from first file found, use 'vars/' relative subdir
- include_vars: "{{lookup('first_found', params)}}"
+ include_vars: "{{ lookup('first_found', params) }}"
vars:
params:
files:
- - '{{ansible_distribution}}.yml'
- - '{{ansible_os_family}}.yml'
+ - '{{ ansible_distribution }}.yml'
+ - '{{ ansible_os_family }}.yml'
- default.yml
paths:
- 'vars'
diff --git a/lib/ansible/plugins/lookup/indexed_items.py b/lib/ansible/plugins/lookup/indexed_items.py
index 967e3999..7f77b6f8 100644
--- a/lib/ansible/plugins/lookup/indexed_items.py
+++ b/lib/ansible/plugins/lookup/indexed_items.py
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
- lookup: indexed_items
+ name: indexed_items
author: Michael DeHaan
version_added: "1.3"
short_description: rewrites lists to return 'indexed items'
diff --git a/lib/ansible/plugins/lookup/ini.py b/lib/ansible/plugins/lookup/ini.py
index b53468b3..224aa1de 100644
--- a/lib/ansible/plugins/lookup/ini.py
+++ b/lib/ansible/plugins/lookup/ini.py
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
- lookup: ini
+ name: ini
author: Yannig Perre (!UNKNOWN) <yannig.perre(at)gmail.com>
version_added: "2.0"
short_description: read data from a ini file
@@ -15,7 +15,10 @@ DOCUMENTATION = """
- "You can also read a property file which - in this case - does not contain section."
options:
_terms:
- description: The key(s) to look up
+ description:
+ The key(s) to look up. On Python 2, key names are case B(insensitive).
+ In Python 3, key names are case B(sensitive). Duplicate key names found
+ in a file will result in an error.
required: True
type:
description: Type of the file. 'properties' refers to the Java properties files.
@@ -63,9 +66,9 @@ import os
import re
from io import StringIO
-from ansible.errors import AnsibleError, AnsibleAssertionError
+from ansible.errors import AnsibleError, AnsibleAssertionError, AnsibleLookupError
from ansible.module_utils.six.moves import configparser
-from ansible.module_utils._text import to_bytes, to_text
+from ansible.module_utils._text import to_text, to_native
from ansible.module_utils.common._collections_compat import MutableSequence
from ansible.plugins.lookup import LookupBase
@@ -153,9 +156,15 @@ class LookupModule(LookupBase):
config.write(contents)
config.seek(0, os.SEEK_SET)
- self.cp.readfp(config)
- var = self.get_value(key, paramvals['section'],
- paramvals['default'], paramvals['re'])
+ try:
+ self.cp.readfp(config)
+ except configparser.DuplicateOptionError as doe:
+ raise AnsibleLookupError("Duplicate option in '{file}': {error}".format(file=paramvals['file'], error=to_native(doe)))
+
+ try:
+ var = self.get_value(key, paramvals['section'], paramvals['default'], paramvals['re'])
+ except configparser.NoSectionError:
+ raise AnsibleLookupError("No section '{section}' in {file}".format(section=paramvals['section'], file=paramvals['file']))
if var is not None:
if isinstance(var, MutableSequence):
for v in var:
diff --git a/lib/ansible/plugins/lookup/inventory_hostnames.py b/lib/ansible/plugins/lookup/inventory_hostnames.py
index a9f521cc..a3cff92e 100644
--- a/lib/ansible/plugins/lookup/inventory_hostnames.py
+++ b/lib/ansible/plugins/lookup/inventory_hostnames.py
@@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
- lookup: inventory_hostnames
+ name: inventory_hostnames
author:
- Michael DeHaan
- Steven Dossett (!UNKNOWN) <sdossett@panath.com>
@@ -34,41 +34,20 @@ RETURN = """
type: list
"""
-from ansible.inventory.manager import split_host_pattern, order_patterns
+from ansible.errors import AnsibleError
+from ansible.inventory.manager import InventoryManager
from ansible.plugins.lookup import LookupBase
-from ansible.utils.helpers import deduplicate_list
class LookupModule(LookupBase):
-
- def get_hosts(self, variables, pattern):
- hosts = []
- if pattern[0] in ('!', '&'):
- obj = pattern[1:]
- else:
- obj = pattern
-
- if obj in variables['groups']:
- hosts = variables['groups'][obj]
- elif obj in variables['groups']['all']:
- hosts = [obj]
- return hosts
-
def run(self, terms, variables=None, **kwargs):
-
- host_list = []
-
- for term in terms:
- patterns = order_patterns(split_host_pattern(term))
-
- for p in patterns:
- that = self.get_hosts(variables, p)
- if p.startswith("!"):
- host_list = [h for h in host_list if h not in that]
- elif p.startswith("&"):
- host_list = [h for h in host_list if h in that]
- else:
- host_list.extend(that)
-
- # return unique list
- return deduplicate_list(host_list)
+ manager = InventoryManager(self._loader, parse=False)
+ for group, hosts in variables['groups'].items():
+ manager.add_group(group)
+ for host in hosts:
+ manager.add_host(host, group=group)
+
+ try:
+ return [h.name for h in manager.get_hosts(pattern=terms)]
+ except AnsibleError:
+ return []
diff --git a/lib/ansible/plugins/lookup/items.py b/lib/ansible/plugins/lookup/items.py
index 3410e746..9cb27030 100644
--- a/lib/ansible/plugins/lookup/items.py
+++ b/lib/ansible/plugins/lookup/items.py
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
- lookup: items
+ name: items
author: Michael DeHaan
version_added: historical
short_description: list of items
diff --git a/lib/ansible/plugins/lookup/lines.py b/lib/ansible/plugins/lookup/lines.py
index b7fb875b..fa578e62 100644
--- a/lib/ansible/plugins/lookup/lines.py
+++ b/lib/ansible/plugins/lookup/lines.py
@@ -6,7 +6,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
- lookup: lines
+ name: lines
author: Daniel Hokka Zakrisson (!UNKNOWN) <daniel@hozac.com>
version_added: "0.9"
short_description: read lines from command
diff --git a/lib/ansible/plugins/lookup/list.py b/lib/ansible/plugins/lookup/list.py
index e57cdd64..8d6d900a 100644
--- a/lib/ansible/plugins/lookup/list.py
+++ b/lib/ansible/plugins/lookup/list.py
@@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
- lookup: list
+ name: list
author: Ansible Core Team
version_added: "2.0"
short_description: simply returns what it is given.
diff --git a/lib/ansible/plugins/lookup/nested.py b/lib/ansible/plugins/lookup/nested.py
index 45304b7b..e38a0f5c 100644
--- a/lib/ansible/plugins/lookup/nested.py
+++ b/lib/ansible/plugins/lookup/nested.py
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
- lookup: nested
+ name: nested
version_added: "1.1"
short_description: composes a list with nested elements of other lists
description:
diff --git a/lib/ansible/plugins/lookup/password.py b/lib/ansible/plugins/lookup/password.py
index 81b5d500..c88edfe0 100644
--- a/lib/ansible/plugins/lookup/password.py
+++ b/lib/ansible/plugins/lookup/password.py
@@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
- lookup: password
+ name: password
version_added: "1.1"
author:
- Daniel Hokka Zakrisson (!UNKNOWN) <daniel@hozac.com>
@@ -28,7 +28,7 @@ DOCUMENTATION = """
required: True
encrypt:
description:
- - Which hash scheme to encrypt the returning password, should be one hash scheme from C(passlib.hash; md5_crypt, bcrypt, sha256_crypt, sha512_crypt)
+ - Which hash scheme to encrypt the returning password, should be one hash scheme from C(passlib.hash; md5_crypt, bcrypt, sha256_crypt, sha512_crypt).
- If not provided, the password will be returned in plain text.
- Note that the password is always stored as plain text, only the returning password is encrypted.
- Encrypt also forces saving the salt value for idempotence.
@@ -37,8 +37,12 @@ DOCUMENTATION = """
version_added: "1.4"
description:
- Define comma separated list of names that compose a custom character set in the generated passwords.
- - 'By default generated passwords contain a random mix of upper and lowercase ASCII letters, the numbers 0-9 and punctuation (". , : - _").'
- - "They can be either parts of Python's string module attributes (ascii_letters,digits, etc) or are used literally ( :, -)."
+ - 'By default generated passwords contain a random mix of upper and lowercase ASCII letters, the numbers 0-9, and punctuation (". , : - _").'
+ - "They can be either parts of Python's string module attributes or represented literally ( :, -)."
+ - "Though string modules can vary by Python version, valid values for both major releases include:
+ 'ascii_lowercase', 'ascii_uppercase', 'digits', 'hexdigits', 'octdigits', 'printable', 'punctuation' and 'whitespace'."
+ - Be aware that Python's 'hexdigits' includes lower and upper case versions of a-f, so it is not a good choice as it doubles
+ the chances of those values for systems that won't distinguish case, distorting the expected entropy.
- "To enter comma use two commas ',,' somewhere - preferably at the end. Quotes and double quotes are not supported."
type: string
length:
@@ -82,8 +86,12 @@ EXAMPLES = """
- name: create a mysql user with a random password using many different char sets
mysql_user:
name: "{{ client }}"
- password: "{{ lookup('password', '/tmp/passwordfile chars=ascii_letters,digits,hexdigits,punctuation') }}"
+ password: "{{ lookup('password', '/tmp/passwordfile chars=ascii_letters,digits,punctuation') }}"
priv: "{{ client }}_{{ tier }}_{{ role }}.*:ALL"
+
+- name: create lowercase 8 character name for Kubernetes pod name
+ set_fact:
+ random_pod_name: "web-{{ lookup('password', '/dev/null chars=ascii_lowercase,digits length=8') }}"
"""
RETURN = """
@@ -104,7 +112,7 @@ from ansible.errors import AnsibleError, AnsibleAssertionError
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.parsing.splitter import parse_kv
from ansible.plugins.lookup import LookupBase
-from ansible.utils.encrypt import do_encrypt, random_password, random_salt
+from ansible.utils.encrypt import BaseHash, do_encrypt, random_password, random_salt
from ansible.utils.path import makedirs_safe
@@ -322,20 +330,24 @@ class LookupModule(LookupBase):
else:
plaintext_password, salt = _parse_content(content)
- if params['encrypt'] and not salt:
+ encrypt = params['encrypt']
+ if encrypt and not salt:
changed = True
- salt = random_salt()
+ try:
+ salt = random_salt(BaseHash.algorithms[encrypt].salt_size)
+ except KeyError:
+ salt = random_salt()
if changed and b_path != to_bytes('/dev/null'):
- content = _format_content(plaintext_password, salt, encrypt=params['encrypt'])
+ content = _format_content(plaintext_password, salt, encrypt=encrypt)
_write_password_file(b_path, content)
if first_process:
# let other processes continue
_release_lock(lockfile)
- if params['encrypt']:
- password = do_encrypt(plaintext_password, params['encrypt'], salt=salt)
+ if encrypt:
+ password = do_encrypt(plaintext_password, encrypt, salt=salt)
ret.append(password)
else:
ret.append(plaintext_password)
diff --git a/lib/ansible/plugins/lookup/pipe.py b/lib/ansible/plugins/lookup/pipe.py
index a640a0cf..d6bce2cd 100644
--- a/lib/ansible/plugins/lookup/pipe.py
+++ b/lib/ansible/plugins/lookup/pipe.py
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r"""
- lookup: pipe
+ name: pipe
author: Daniel Hokka Zakrisson (!UNKNOWN) <daniel@hozac.com>
version_added: "0.9"
short_description: read output from a command
diff --git a/lib/ansible/plugins/lookup/random_choice.py b/lib/ansible/plugins/lookup/random_choice.py
index 348a41d6..24aea9ac 100644
--- a/lib/ansible/plugins/lookup/random_choice.py
+++ b/lib/ansible/plugins/lookup/random_choice.py
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
- lookup: random_choice
+ name: random_choice
author: Michael DeHaan
version_added: "1.1"
short_description: return random element from list
diff --git a/lib/ansible/plugins/lookup/sequence.py b/lib/ansible/plugins/lookup/sequence.py
index ddf65676..e2ea2194 100644
--- a/lib/ansible/plugins/lookup/sequence.py
+++ b/lib/ansible/plugins/lookup/sequence.py
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
- lookup: sequence
+ name: sequence
author: Jayson Vantuyl (!UNKNOWN) <jayson@aggressive.ly>
version_added: "1.0"
short_description: generate a list based on a number sequence
@@ -58,8 +58,9 @@ EXAMPLES = """
with_sequence: count=4
- name: the final countdown
- debug: msg={{item}} seconds to detonation
- with_sequence: end=0 start=10
+ debug:
+ msg: "{{item}} seconds to detonation"
+ with_sequence: start=10 end=0 stride=-1
- name: Use of variable
debug:
diff --git a/lib/ansible/plugins/lookup/subelements.py b/lib/ansible/plugins/lookup/subelements.py
index b05856bf..27350b08 100644
--- a/lib/ansible/plugins/lookup/subelements.py
+++ b/lib/ansible/plugins/lookup/subelements.py
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
- lookup: subelements
+ name: subelements
author: Serge van Ginderachter (!UNKNOWN) <serge@vanginderachter.be>
version_added: "1.4"
short_description: traverse nested key from a list of dictionaries
diff --git a/lib/ansible/plugins/lookup/template.py b/lib/ansible/plugins/lookup/template.py
index dd4a2749..f1cbe85a 100644
--- a/lib/ansible/plugins/lookup/template.py
+++ b/lib/ansible/plugins/lookup/template.py
@@ -6,7 +6,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
- lookup: template
+ name: template
author: Michael DeHaan
version_added: "0.9"
short_description: retrieve contents of file after templating with Jinja2
@@ -17,7 +17,9 @@ DOCUMENTATION = """
description: list of files to template
convert_data:
type: bool
- description: whether to convert YAML into data. If False, strings that are YAML will be left untouched.
+ description:
+ - Whether to convert YAML into data. If False, strings that are YAML will be left untouched.
+ - Mutually exclusive with the jinja2_native option.
variable_start_string:
description: The string marking the beginning of a print statement.
default: '{{'
@@ -28,6 +30,16 @@ DOCUMENTATION = """
default: '}}'
version_added: '2.8'
type: str
+ jinja2_native:
+ description:
+ - Controls whether to use Jinja2 native types.
+ - It is off by default even if global jinja2_native is True.
+ - Has no effect if global jinja2_native is False.
+ - This offers more flexibility than the template module which does not use Jinja2 native types at all.
+ - Mutually exclusive with the convert_data option.
+ default: False
+ version_added: '2.11'
+ type: bool
"""
EXAMPLES = """
@@ -53,23 +65,32 @@ import os
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils._text import to_bytes, to_text
-from ansible.template import generate_ansible_template_vars
+from ansible.template import generate_ansible_template_vars, AnsibleEnvironment, USE_JINJA2_NATIVE
from ansible.utils.display import Display
+if USE_JINJA2_NATIVE:
+ from ansible.utils.native_jinja import NativeJinjaText
+
+
display = Display()
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
-
convert_data_p = kwargs.get('convert_data', True)
lookup_template_vars = kwargs.get('template_vars', {})
+ jinja2_native = kwargs.get('jinja2_native', False)
ret = []
variable_start_string = kwargs.get('variable_start_string', None)
variable_end_string = kwargs.get('variable_end_string', None)
+ if USE_JINJA2_NATIVE and not jinja2_native:
+ templar = self._templar.copy_with_new_env(environment_class=AnsibleEnvironment)
+ else:
+ templar = self._templar
+
for term in terms:
display.debug("File lookup term: %s" % term)
@@ -97,15 +118,19 @@ class LookupModule(LookupBase):
# plus anything passed to the lookup with the template_vars=
# argument.
vars = deepcopy(variables)
- vars.update(generate_ansible_template_vars(lookupfile))
+ vars.update(generate_ansible_template_vars(term, lookupfile))
vars.update(lookup_template_vars)
- # do the templating
- with self._templar.set_temporary_context(variable_start_string=variable_start_string,
- variable_end_string=variable_end_string,
- available_variables=vars, searchpath=searchpath):
- res = self._templar.template(template_data, preserve_trailing_newlines=True,
- convert_data=convert_data_p, escape_backslashes=False)
+ with templar.set_temporary_context(variable_start_string=variable_start_string,
+ variable_end_string=variable_end_string,
+ available_variables=vars, searchpath=searchpath):
+ res = templar.template(template_data, preserve_trailing_newlines=True,
+ convert_data=convert_data_p, escape_backslashes=False)
+
+ if USE_JINJA2_NATIVE and not jinja2_native:
+ # jinja2_native is true globally but off for the lookup, we need this text
+ # not to be processed by literal_eval anywhere in Ansible
+ res = NativeJinjaText(res)
ret.append(res)
else:
diff --git a/lib/ansible/plugins/lookup/together.py b/lib/ansible/plugins/lookup/together.py
index a20e205a..8ba62f14 100644
--- a/lib/ansible/plugins/lookup/together.py
+++ b/lib/ansible/plugins/lookup/together.py
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
- lookup: together
+ name: together
author: Bradley Young (!UNKNOWN) <young.bradley@gmail.com>
version_added: '1.3'
short_description: merges lists into synchronized list
diff --git a/lib/ansible/plugins/lookup/unvault.py b/lib/ansible/plugins/lookup/unvault.py
index 16c1d71d..3712ba5b 100644
--- a/lib/ansible/plugins/lookup/unvault.py
+++ b/lib/ansible/plugins/lookup/unvault.py
@@ -4,7 +4,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
- lookup: unvault
+ name: unvault
author: Ansible Core Team
version_added: "2.10"
short_description: read vaulted file(s) contents
diff --git a/lib/ansible/plugins/lookup/url.py b/lib/ansible/plugins/lookup/url.py
index 1c24ebf8..026590f3 100644
--- a/lib/ansible/plugins/lookup/url.py
+++ b/lib/ansible/plugins/lookup/url.py
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
-lookup: url
+name: url
author: Brian Coca (@bcoca)
version_added: "1.9"
short_description: return contents from URL
@@ -64,9 +64,10 @@ options:
- section: url_lookup
key: timeout
http_agent:
- description: User-Agent to use in the request
+ description: User-Agent to use in the request. The default was changed in 2.11 to C(ansible-httpget).
type: string
version_added: "2.10"
+ default: ansible-httpget
vars:
- name: ansible_lookup_url_agent
env:
@@ -99,7 +100,9 @@ options:
- section: url_lookup
key: follow_redirects
use_gssapi:
- description: Use GSSAPI handler of requests
+ description:
+ - Use GSSAPI handler of requests
+ - As of Ansible 2.11, GSSAPI credentials can be specified with I(username) and I(password).
type: boolean
version_added: "2.10"
default: False
diff --git a/lib/ansible/plugins/lookup/varnames.py b/lib/ansible/plugins/lookup/varnames.py
index 32862951..6a3def37 100644
--- a/lib/ansible/plugins/lookup/varnames.py
+++ b/lib/ansible/plugins/lookup/varnames.py
@@ -4,7 +4,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
- lookup: varnames
+ name: varnames
author: Ansible Core Team
version_added: "2.8"
short_description: Lookup matching variable names
@@ -66,7 +66,7 @@ class LookupModule(LookupBase):
for term in terms:
if not isinstance(term, string_types):
- raise AnsibleError('Invalid setting identifier, "%s" is not a string, its a %s' % (term, type(term)))
+ raise AnsibleError('Invalid setting identifier, "%s" is not a string, it is a %s' % (term, type(term)))
try:
name = re.compile(term)
diff --git a/lib/ansible/plugins/lookup/vars.py b/lib/ansible/plugins/lookup/vars.py
index da3848ba..9e147352 100644
--- a/lib/ansible/plugins/lookup/vars.py
+++ b/lib/ansible/plugins/lookup/vars.py
@@ -4,7 +4,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
- lookup: vars
+ name: vars
author: Ansible Core Team
version_added: "2.5"
short_description: Lookup templated value of variables
diff --git a/lib/ansible/plugins/netconf/__init__.py b/lib/ansible/plugins/netconf/__init__.py
index d25f4d35..95442e60 100644
--- a/lib/ansible/plugins/netconf/__init__.py
+++ b/lib/ansible/plugins/netconf/__init__.py
@@ -24,7 +24,7 @@ from functools import wraps
from ansible.errors import AnsibleError
from ansible.plugins import AnsiblePlugin
-from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils._text import to_native
from ansible.module_utils.basic import missing_required_lib
try:
@@ -269,7 +269,6 @@ class NetconfBase(AnsiblePlugin):
and set a token on the ongoing confirmed commit
:return: Returns xml string containing the RPC response received from remote host
"""
- timeout = to_text(timeout, errors='surrogate_or_strict')
resp = self.m.commit(confirmed=confirmed, timeout=timeout, persist=persist)
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
diff --git a/lib/ansible/plugins/shell/__init__.py b/lib/ansible/plugins/shell/__init__.py
index 41c24441..8aeab292 100644
--- a/lib/ansible/plugins/shell/__init__.py
+++ b/lib/ansible/plugins/shell/__init__.py
@@ -107,6 +107,13 @@ class ShellBase(AnsiblePlugin):
return ' '.join(cmd)
+ def chgrp(self, paths, group):
+ cmd = ['chgrp', group]
+ cmd.extend(paths)
+ cmd = [shlex_quote(c) for c in cmd]
+
+ return ' '.join(cmd)
+
def set_user_facl(self, paths, user, mode):
"""Only sets acls for users as that's really all we need"""
cmd = ['setfacl', '-m', 'u:%s:%s' % (user, mode)]
diff --git a/lib/ansible/plugins/shell/cmd.py b/lib/ansible/plugins/shell/cmd.py
index d83aa115..e73c6719 100644
--- a/lib/ansible/plugins/shell/cmd.py
+++ b/lib/ansible/plugins/shell/cmd.py
@@ -5,7 +5,6 @@ __metaclass__ = type
DOCUMENTATION = '''
name: cmd
-plugin_type: shell
version_added: '2.8'
short_description: Windows Command Prompt
description:
diff --git a/lib/ansible/plugins/shell/powershell.py b/lib/ansible/plugins/shell/powershell.py
index 64ea5824..11d1cfcc 100644
--- a/lib/ansible/plugins/shell/powershell.py
+++ b/lib/ansible/plugins/shell/powershell.py
@@ -6,7 +6,6 @@ __metaclass__ = type
DOCUMENTATION = '''
name: powershell
-plugin_type: shell
version_added: historical
short_description: Windows PowerShell
description:
diff --git a/lib/ansible/plugins/shell/sh.py b/lib/ansible/plugins/shell/sh.py
index 76a386f7..5b13a015 100644
--- a/lib/ansible/plugins/shell/sh.py
+++ b/lib/ansible/plugins/shell/sh.py
@@ -6,7 +6,6 @@ __metaclass__ = type
DOCUMENTATION = '''
name: sh
-plugin_type: shell
short_description: "POSIX shell (/bin/sh)"
version_added: historical
description:
diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py
index e60b9431..46b3885c 100644
--- a/lib/ansible/plugins/strategy/__init__.py
+++ b/lib/ansible/plugins/strategy/__init__.py
@@ -37,6 +37,7 @@ from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleParserError
from ansible.executor import action_write_locks
from ansible.executor.process.worker import WorkerProcess
from ansible.executor.task_result import TaskResult
+from ansible.executor.task_queue_manager import CallbackSend
from ansible.module_utils.six.moves import queue as Queue
from ansible.module_utils.six import iteritems, itervalues, string_types
from ansible.module_utils._text import to_text
@@ -49,6 +50,7 @@ from ansible.playbook.task_include import TaskInclude
from ansible.plugins import loader as plugin_loader
from ansible.template import Templar
from ansible.utils.display import Display
+from ansible.utils.unsafe_proxy import wrap_var
from ansible.utils.vars import combine_vars
from ansible.vars.clean import strip_internal_keys, module_response_deepcopy
@@ -67,14 +69,6 @@ class StrategySentinel:
pass
-def SharedPluginLoaderObj():
- '''This only exists for backwards compat, do not use.
- '''
- display.deprecated('SharedPluginLoaderObj is deprecated, please directly use ansible.plugins.loader',
- version='2.11', collection_name='ansible.builtin')
- return plugin_loader
-
-
_sentinel = StrategySentinel()
@@ -100,7 +94,14 @@ def results_thread_main(strategy):
result = strategy._final_q.get()
if isinstance(result, StrategySentinel):
break
+ elif isinstance(result, CallbackSend):
+ for arg in result.args:
+ if isinstance(arg, TaskResult):
+ strategy.normalize_task_result(arg)
+ break
+ strategy._tqm.send_callback(result.method_name, *result.args, **result.kwargs)
elif isinstance(result, TaskResult):
+ strategy.normalize_task_result(result)
with strategy._results_lock:
# only handlers have the listen attr, so this must be a handler
# we split up the results into two queues here to make sure
@@ -451,6 +452,31 @@ class StrategyBase:
for target_host in host_list:
_set_host_facts(target_host, always_facts)
+ def normalize_task_result(self, task_result):
+ """Normalize a TaskResult to reference actual Host and Task objects
+ when only given the ``Host.name``, or the ``Task._uuid``
+
+ Only the ``Host.name`` and ``Task._uuid`` are commonly sent back from
+ the ``TaskExecutor`` or ``WorkerProcess`` due to performance concerns
+
+ Mutates the original object
+ """
+
+ if isinstance(task_result._host, string_types):
+ # If the value is a string, it is ``Host.name``
+ task_result._host = self._inventory.get_host(to_text(task_result._host))
+
+ if isinstance(task_result._task, string_types):
+ # If the value is a string, it is ``Task._uuid``
+ queue_cache_entry = (task_result._host.name, task_result._task)
+ found_task = self._queued_task_cache.get(queue_cache_entry)['task']
+ original_task = found_task.copy(exclude_parent=True, exclude_tasks=True)
+ original_task._parent = found_task._parent
+ original_task.from_attrs(task_result._task_fields)
+ task_result._task = original_task
+
+ return task_result
+
@debug_closure
def _process_pending_results(self, iterator, one_pass=False, max_passes=None, do_handlers=False):
'''
@@ -461,14 +487,6 @@ class StrategyBase:
ret_results = []
handler_templar = Templar(self._loader)
- def get_original_host(host_name):
- # FIXME: this should not need x2 _inventory
- host_name = to_text(host_name)
- if host_name in self._inventory.hosts:
- return self._inventory.hosts[host_name]
- else:
- return self._inventory.get_host(host_name)
-
def search_handler_blocks_by_name(handler_name, handler_blocks):
# iterate in reversed order since last handler loaded with the same name wins
for handler_block in reversed(handler_blocks):
@@ -517,32 +535,8 @@ class StrategyBase:
finally:
self._results_lock.release()
- # get the original host and task. We then assign them to the TaskResult for use in callbacks/etc.
- original_host = get_original_host(task_result._host)
- queue_cache_entry = (original_host.name, task_result._task)
- found_task = self._queued_task_cache.get(queue_cache_entry)['task']
- original_task = found_task.copy(exclude_parent=True, exclude_tasks=True)
- original_task._parent = found_task._parent
- original_task.from_attrs(task_result._task_fields)
-
- task_result._host = original_host
- task_result._task = original_task
-
- # send callbacks for 'non final' results
- if '_ansible_retry' in task_result._result:
- self._tqm.send_callback('v2_runner_retry', task_result)
- continue
- elif '_ansible_item_result' in task_result._result:
- if task_result.is_failed() or task_result.is_unreachable():
- self._tqm.send_callback('v2_runner_item_on_failed', task_result)
- elif task_result.is_skipped():
- self._tqm.send_callback('v2_runner_item_on_skipped', task_result)
- else:
- if 'diff' in task_result._result:
- if self._diff or getattr(original_task, 'diff', False):
- self._tqm.send_callback('v2_on_file_diff', task_result)
- self._tqm.send_callback('v2_runner_item_on_ok', task_result)
- continue
+ original_host = task_result._host
+ original_task = task_result._task
# all host status messages contain 2 entries: (msg, task_result)
role_ran = False
@@ -579,7 +573,7 @@ class StrategyBase:
self._variable_manager.set_nonpersistent_facts(
original_host.name,
dict(
- ansible_failed_task=original_task.serialize(),
+ ansible_failed_task=wrap_var(original_task.serialize()),
ansible_failed_result=task_result._result,
),
)
@@ -670,7 +664,7 @@ class StrategyBase:
self._add_group(original_host, result_item)
post_process_whens(result_item, original_task, handler_templar)
- if 'ansible_facts' in result_item:
+ if 'ansible_facts' in result_item and original_task.action not in C._ACTION_DEBUG:
# if delegated fact and we are delegating facts, we need to change target host for them
if original_task.delegate_to is not None and original_task.delegate_facts:
host_list = self.get_delegated_hosts(result_item, original_task)
@@ -1141,22 +1135,21 @@ class StrategyBase:
skipped = False
msg = ''
- # The top-level conditions should only compare meta_action
+ skip_reason = '%s conditional evaluated to False' % meta_action
+ self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
+
+ # These don't support "when" conditionals
+ if meta_action in ('noop', 'flush_handlers', 'refresh_inventory', 'reset_connection') and task.when:
+ self._cond_not_supported_warn(meta_action)
+
if meta_action == 'noop':
- # FIXME: issue a callback for the noop here?
- if task.when:
- self._cond_not_supported_warn(meta_action)
msg = "noop"
elif meta_action == 'flush_handlers':
- if task.when:
- self._cond_not_supported_warn(meta_action)
self._flushed_hosts[target_host] = True
self.run_handlers(iterator, play_context)
self._flushed_hosts[target_host] = False
msg = "ran handlers"
elif meta_action == 'refresh_inventory':
- if task.when:
- self._cond_not_supported_warn(meta_action)
self._inventory.refresh_inventory()
self._set_hosts_cache(iterator._play)
msg = "inventory successfully refreshed"
@@ -1168,6 +1161,7 @@ class StrategyBase:
msg = "facts cleared"
else:
skipped = True
+ skip_reason += ', not clearing facts and fact cache for %s' % target_host.name
elif meta_action == 'clear_host_errors':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
@@ -1177,12 +1171,16 @@ class StrategyBase:
msg = "cleared host errors"
else:
skipped = True
+ skip_reason += ', not clearing host error state for %s' % target_host.name
elif meta_action == 'end_play':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
if host.name not in self._tqm._unreachable_hosts:
iterator._host_states[host.name].run_state = iterator.ITERATING_COMPLETE
msg = "ending play"
+ else:
+ skipped = True
+ skip_reason += ', continuing play'
elif meta_action == 'end_host':
if _evaluate_conditional(target_host):
iterator._host_states[target_host.name].run_state = iterator.ITERATING_COMPLETE
@@ -1190,7 +1188,16 @@ class StrategyBase:
msg = "ending play for %s" % target_host.name
else:
skipped = True
+ skip_reason += ", continuing execution for %s" % target_host.name
+ # TODO: Nix msg here? Left for historical reasons, but skip_reason exists now.
msg = "end_host conditional evaluated to false, continuing execution for %s" % target_host.name
+ elif meta_action == 'role_complete':
+ # Allow users to use this in a play as reported in https://github.com/ansible/ansible/issues/22286?
+ # How would this work with allow_duplicates??
+ if task.implicit:
+ if target_host.name in task._role._had_task_run:
+ task._role._completed[target_host.name] = True
+ msg = 'role_complete for %s' % target_host.name
elif meta_action == 'reset_connection':
all_vars = self._variable_manager.get_vars(play=iterator._play, host=target_host, task=task,
_hosts=self._hosts_cache, _hosts_all=self._hosts_cache_all)
@@ -1214,14 +1221,12 @@ class StrategyBase:
# a certain subset of variables exist.
play_context.update_vars(all_vars)
- if task.when:
- self._cond_not_supported_warn(meta_action)
-
if target_host in self._active_connections:
connection = Connection(self._active_connections[target_host])
del self._active_connections[target_host]
else:
connection = plugin_loader.connection_loader.get(play_context.connection, play_context, os.devnull)
+ connection.set_options(task_keys=task.dump_attrs(), var_options=all_vars)
play_context.set_attributes_from_plugin(connection)
if connection:
@@ -1239,12 +1244,16 @@ class StrategyBase:
result = {'msg': msg}
if skipped:
result['skipped'] = True
+ result['skip_reason'] = skip_reason
else:
result['changed'] = False
display.vv("META: %s" % msg)
- return [TaskResult(target_host, task, result)]
+ res = TaskResult(target_host, task, result)
+ if skipped:
+ self._tqm.send_callback('v2_runner_on_skipped', res)
+ return [res]
def get_hosts_left(self, iterator):
''' returns list of available hosts for this iterator by filtering out unreachables '''
@@ -1331,7 +1340,7 @@ class Debugger(cmd.Cmd):
def do_update_task(self, args):
"""Recreate the task from ``task._ds``, and template with updated ``task_vars``"""
- templar = Templar(None, shared_loader_obj=None, variables=self.scope['task_vars'])
+ templar = Templar(None, variables=self.scope['task_vars'])
task = self.scope['task']
task = task.load_data(task._ds)
task.post_validate(templar)
diff --git a/lib/ansible/plugins/strategy/debug.py b/lib/ansible/plugins/strategy/debug.py
index 1b23c7df..f808bcfa 100644
--- a/lib/ansible/plugins/strategy/debug.py
+++ b/lib/ansible/plugins/strategy/debug.py
@@ -16,7 +16,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
- strategy: debug
+ name: debug
short_description: Executes tasks in interactive debug session.
description:
- Task execution is 'linear' but controlled by an interactive debug session.
diff --git a/lib/ansible/plugins/strategy/free.py b/lib/ansible/plugins/strategy/free.py
index 77988087..7e040698 100644
--- a/lib/ansible/plugins/strategy/free.py
+++ b/lib/ansible/plugins/strategy/free.py
@@ -19,7 +19,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
- strategy: free
+ name: free
short_description: Executes tasks without waiting for all hosts
description:
- Task execution is as fast as possible per batch as defined by C(serial) (default all).
@@ -92,6 +92,9 @@ class StrategyModule(StrategyBase):
self._set_hosts_cache(iterator._play)
+ if iterator._play.max_fail_percentage is not None:
+ display.warning("Using max_fail_percentage with the free strategy is not supported, as tasks are executed independently on each host")
+
work_to_do = True
while work_to_do and not self._tqm._terminated:
@@ -259,7 +262,7 @@ class StrategyModule(StrategyBase):
continue
for new_block in new_blocks:
- task_vars = self._variable_manager.get_vars(play=iterator._play, task=new_block._parent,
+ task_vars = self._variable_manager.get_vars(play=iterator._play, task=new_block.get_first_parent_include(),
_hosts=self._hosts_cache,
_hosts_all=self._hosts_cache_all)
final_block = new_block.filter_tagged_tasks(task_vars)
diff --git a/lib/ansible/plugins/strategy/host_pinned.py b/lib/ansible/plugins/strategy/host_pinned.py
index ba293d36..70f22eb8 100644
--- a/lib/ansible/plugins/strategy/host_pinned.py
+++ b/lib/ansible/plugins/strategy/host_pinned.py
@@ -19,7 +19,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
- strategy: host_pinned
+ name: host_pinned
short_description: Executes tasks on each host without interruption
description:
- Task execution is as fast as possible per host in batch as defined by C(serial) (default all).
diff --git a/lib/ansible/plugins/strategy/linear.py b/lib/ansible/plugins/strategy/linear.py
index 1810757d..8b434983 100644
--- a/lib/ansible/plugins/strategy/linear.py
+++ b/lib/ansible/plugins/strategy/linear.py
@@ -19,7 +19,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
- strategy: linear
+ name: linear
short_description: Executes tasks in a linear fashion
description:
- Task execution is in lockstep per host batch as defined by C(serial) (default all).
@@ -75,6 +75,7 @@ class StrategyModule(StrategyBase):
self.noop_task = Task()
self.noop_task.action = 'meta'
self.noop_task.args['_raw_params'] = 'noop'
+ self.noop_task.implicit = True
self.noop_task.set_loader(iterator._play._loader)
return self._create_noop_block_from(original_block, parent)
@@ -89,6 +90,7 @@ class StrategyModule(StrategyBase):
noop_task = Task()
noop_task.action = 'meta'
noop_task.args['_raw_params'] = 'noop'
+ noop_task.implicit = True
noop_task.set_loader(iterator._play._loader)
host_tasks = {}
@@ -261,20 +263,20 @@ class StrategyModule(StrategyBase):
# sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we
# will only send this task to the first host in the list.
- task.action = templar.template(task.action)
+ task_action = templar.template(task.action)
try:
- action = action_loader.get(task.action, class_only=True, collection_list=task.collections)
+ action = action_loader.get(task_action, class_only=True, collection_list=task.collections)
except KeyError:
# we don't care here, because the action may simply not have a
# corresponding action plugin
action = None
- if task.action in C._ACTION_META:
+ if task_action in C._ACTION_META:
# for the linear strategy, we run meta tasks just once and for
# all hosts currently being iterated over rather than one host
results.extend(self._execute_meta(task, play_context, iterator, host))
- if task.args.get('_raw_params', None) not in ('noop', 'reset_connection', 'end_host'):
+ if task.args.get('_raw_params', None) not in ('noop', 'reset_connection', 'end_host', 'role_complete'):
run_once = True
if (task.any_errors_fatal or run_once) and not task.ignore_errors:
any_errors_fatal = True
@@ -365,7 +367,7 @@ class StrategyModule(StrategyBase):
for new_block in new_blocks:
task_vars = self._variable_manager.get_vars(
play=iterator._play,
- task=new_block._parent,
+ task=new_block.get_first_parent_include(),
_hosts=self._hosts_cache,
_hosts_all=self._hosts_cache_all,
)
diff --git a/lib/ansible/plugins/test/core.py b/lib/ansible/plugins/test/core.py
index 40733a14..42aec36a 100644
--- a/lib/ansible/plugins/test/core.py
+++ b/lib/ansible/plugins/test/core.py
@@ -24,10 +24,11 @@ import operator as py_operator
from distutils.version import LooseVersion, StrictVersion
from ansible import errors
-from ansible.module_utils._text import to_text
+from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.common._collections_compat import MutableMapping, MutableSequence
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.utils.display import Display
+from ansible.utils.version import SemanticVersion
display = Display()
@@ -146,7 +147,7 @@ def search(value, pattern='', ignorecase=False, multiline=False):
return regex(value, pattern, ignorecase, multiline, 'search')
-def version_compare(value, version, operator='eq', strict=False):
+def version_compare(value, version, operator='eq', strict=None, version_type=None):
''' Perform a version comparison on a value '''
op_map = {
'==': 'eq', '=': 'eq', 'eq': 'eq',
@@ -157,21 +158,45 @@ def version_compare(value, version, operator='eq', strict=False):
'!=': 'ne', '<>': 'ne', 'ne': 'ne'
}
+ type_map = {
+ 'loose': LooseVersion,
+ 'strict': StrictVersion,
+ 'semver': SemanticVersion,
+ 'semantic': SemanticVersion,
+ }
+
+ if strict is not None and version_type is not None:
+ raise errors.AnsibleFilterError("Cannot specify both 'strict' and 'version_type'")
+
+ if not value:
+ raise errors.AnsibleFilterError("Input version value cannot be empty")
+
+ if not version:
+ raise errors.AnsibleFilterError("Version parameter to compare against cannot be empty")
+
+ Version = LooseVersion
if strict:
Version = StrictVersion
- else:
- Version = LooseVersion
+ elif version_type:
+ try:
+ Version = type_map[version_type]
+ except KeyError:
+ raise errors.AnsibleFilterError(
+ "Invalid version type (%s). Must be one of %s" % (version_type, ', '.join(map(repr, type_map)))
+ )
if operator in op_map:
operator = op_map[operator]
else:
- raise errors.AnsibleFilterError('Invalid operator type')
+ raise errors.AnsibleFilterError(
+ 'Invalid operator type (%s). Must be one of %s' % (operator, ', '.join(map(repr, op_map)))
+ )
try:
method = getattr(py_operator, operator)
- return method(Version(str(value)), Version(str(version)))
+ return method(Version(to_text(value)), Version(to_text(version)))
except Exception as e:
- raise errors.AnsibleFilterError('Version comparison: %s' % e)
+ raise errors.AnsibleFilterError('Version comparison failed: %s' % to_native(e))
def truthy(value, convert_bool=False):
diff --git a/lib/ansible/plugins/vars/host_group_vars.py b/lib/ansible/plugins/vars/host_group_vars.py
index 377a77de..7bdfe332 100644
--- a/lib/ansible/plugins/vars/host_group_vars.py
+++ b/lib/ansible/plugins/vars/host_group_vars.py
@@ -19,7 +19,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
- vars: host_group_vars
+ name: host_group_vars
version_added: "2.4"
short_description: In charge of loading group_vars and host_vars
requirements:
diff --git a/lib/ansible/release.py b/lib/ansible/release.py
index b5ae9fcc..7f45e40b 100644
--- a/lib/ansible/release.py
+++ b/lib/ansible/release.py
@@ -19,6 +19,6 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-__version__ = '2.10.7'
+__version__ = '2.11.5'
__author__ = 'Ansible, Inc.'
-__codename__ = 'When the Levee Breaks'
+__codename__ = 'Hey Hey, What Can I Do'
diff --git a/lib/ansible/template/__init__.py b/lib/ansible/template/__init__.py
index eb1276c2..6dc7d8e3 100644
--- a/lib/ansible/template/__init__.py
+++ b/lib/ansible/template/__init__.py
@@ -56,6 +56,7 @@ from ansible.template.vars import AnsibleJ2Vars
from ansible.utils.collection_loader import AnsibleCollectionRef
from ansible.utils.display import Display
from ansible.utils.collection_loader._collection_finder import _get_collection_metadata
+from ansible.utils.listify import listify_lookup_plugin_terms
from ansible.utils.unsafe_proxy import wrap_var
display = Display()
@@ -72,13 +73,16 @@ NON_TEMPLATED_TYPES = (bool, Number)
JINJA2_OVERRIDE = '#jinja2:'
from jinja2 import __version__ as j2_version
+from jinja2 import Environment
+from jinja2.utils import concat as j2_concat
+
USE_JINJA2_NATIVE = False
if C.DEFAULT_JINJA2_NATIVE:
try:
- from jinja2.nativetypes import NativeEnvironment as Environment
- from ansible.template.native_helpers import ansible_native_concat as j2_concat
- from ansible.template.native_helpers import NativeJinjaText
+ from jinja2.nativetypes import NativeEnvironment
+ from ansible.template.native_helpers import ansible_native_concat
+ from ansible.utils.native_jinja import NativeJinjaText
USE_JINJA2_NATIVE = True
except ImportError:
from jinja2 import Environment
@@ -87,9 +91,6 @@ if C.DEFAULT_JINJA2_NATIVE:
'jinja2_native requires Jinja 2.10 and above. '
'Version detected: %s. Falling back to default.' % j2_version
)
-else:
- from jinja2 import Environment
- from jinja2.utils import concat as j2_concat
JINJA2_BEGIN_TOKENS = frozenset(('variable_begin', 'block_begin', 'comment_begin', 'raw_begin'))
@@ -99,8 +100,13 @@ JINJA2_END_TOKENS = frozenset(('variable_end', 'block_end', 'comment_end', 'raw_
RANGE_TYPE = type(range(0))
-def generate_ansible_template_vars(path, dest_path=None):
- b_path = to_bytes(path)
+def generate_ansible_template_vars(path, fullpath=None, dest_path=None):
+
+ if fullpath is None:
+ b_path = to_bytes(path)
+ else:
+ b_path = to_bytes(fullpath)
+
try:
template_uid = pwd.getpwuid(os.stat(b_path).st_uid).pw_name
except (KeyError, TypeError):
@@ -111,11 +117,15 @@ def generate_ansible_template_vars(path, dest_path=None):
'template_path': path,
'template_mtime': datetime.datetime.fromtimestamp(os.path.getmtime(b_path)),
'template_uid': to_text(template_uid),
- 'template_fullpath': os.path.abspath(path),
'template_run_date': datetime.datetime.now(),
'template_destpath': to_native(dest_path) if dest_path else None,
}
+ if fullpath is None:
+ temp_vars['template_fullpath'] = os.path.abspath(path)
+ else:
+ temp_vars['template_fullpath'] = fullpath
+
managed_default = C.DEFAULT_MANAGED_STR
managed_str = managed_default.format(
host=temp_vars['template_host'],
@@ -257,7 +267,6 @@ def _unroll_iterator(func):
return list(ret)
return ret
- wrapper.__UNROLLED__ = True
return _update_wrapper(wrapper, func)
@@ -399,10 +408,11 @@ class AnsibleContext(Context):
class JinjaPluginIntercept(MutableMapping):
- def __init__(self, delegatee, pluginloader, *args, **kwargs):
+ def __init__(self, delegatee, pluginloader, jinja2_native, *args, **kwargs):
super(JinjaPluginIntercept, self).__init__(*args, **kwargs)
self._delegatee = delegatee
self._pluginloader = pluginloader
+ self._jinja2_native = jinja2_native
if self._pluginloader.class_name == 'FilterModule':
self._method_map_name = 'filters'
@@ -413,9 +423,30 @@ class JinjaPluginIntercept(MutableMapping):
self._collection_jinja_func_cache = {}
+ self._ansible_plugins_loaded = False
+
+ def _load_ansible_plugins(self):
+ if self._ansible_plugins_loaded:
+ return
+
+ for plugin in self._pluginloader.all():
+ method_map = getattr(plugin, self._method_map_name)
+ self._delegatee.update(method_map())
+
+ if self._pluginloader.class_name == 'FilterModule':
+ for plugin_name, plugin in self._delegatee.items():
+ if self._jinja2_native and plugin_name in C.STRING_TYPE_FILTERS:
+ self._delegatee[plugin_name] = _wrap_native_text(plugin)
+ else:
+ self._delegatee[plugin_name] = _unroll_iterator(plugin)
+
+ self._ansible_plugins_loaded = True
+
# FUTURE: we can cache FQ filter/test calls for the entire duration of a run, since a given collection's impl's
# aren't supposed to change during a run
def __getitem__(self, key):
+ self._load_ansible_plugins()
+
try:
if not isinstance(key, string_types):
raise ValueError('key must be a string')
@@ -510,11 +541,14 @@ class JinjaPluginIntercept(MutableMapping):
for func_name, func in iteritems(method_map()):
fq_name = '.'.join((parent_prefix, func_name))
# FIXME: detect/warn on intra-collection function name collisions
- if USE_JINJA2_NATIVE and fq_name.startswith(('ansible.builtin.', 'ansible.legacy.')) and \
- func_name in C.STRING_TYPE_FILTERS:
- self._collection_jinja_func_cache[fq_name] = _wrap_native_text(func)
+ if self._pluginloader.class_name == 'FilterModule':
+ if self._jinja2_native and fq_name.startswith(('ansible.builtin.', 'ansible.legacy.')) and \
+ func_name in C.STRING_TYPE_FILTERS:
+ self._collection_jinja_func_cache[fq_name] = _wrap_native_text(func)
+ else:
+ self._collection_jinja_func_cache[fq_name] = _unroll_iterator(func)
else:
- self._collection_jinja_func_cache[fq_name] = _unroll_iterator(func)
+ self._collection_jinja_func_cache[fq_name] = func
function_impl = self._collection_jinja_func_cache[key]
return function_impl
@@ -546,6 +580,9 @@ class AnsibleEnvironment(Environment):
'''
Our custom environment, which simply allows us to override the class-level
values for the Template and Context classes used by jinja2 internally.
+
+ NOTE: Any changes to this class must be reflected in
+ :class:`AnsibleNativeEnvironment` as well.
'''
context_class = AnsibleContext
template_class = AnsibleJ2Template
@@ -553,8 +590,27 @@ class AnsibleEnvironment(Environment):
def __init__(self, *args, **kwargs):
super(AnsibleEnvironment, self).__init__(*args, **kwargs)
- self.filters = JinjaPluginIntercept(self.filters, filter_loader)
- self.tests = JinjaPluginIntercept(self.tests, test_loader)
+ self.filters = JinjaPluginIntercept(self.filters, filter_loader, jinja2_native=False)
+ self.tests = JinjaPluginIntercept(self.tests, test_loader, jinja2_native=False)
+
+
+if USE_JINJA2_NATIVE:
+ class AnsibleNativeEnvironment(NativeEnvironment):
+ '''
+ Our custom environment, which simply allows us to override the class-level
+ values for the Template and Context classes used by jinja2 internally.
+
+ NOTE: Any changes to this class must be reflected in
+ :class:`AnsibleEnvironment` as well.
+ '''
+ context_class = AnsibleContext
+ template_class = AnsibleJ2Template
+
+ def __init__(self, *args, **kwargs):
+ super(AnsibleNativeEnvironment, self).__init__(*args, **kwargs)
+
+ self.filters = JinjaPluginIntercept(self.filters, filter_loader, jinja2_native=True)
+ self.tests = JinjaPluginIntercept(self.tests, test_loader, jinja2_native=True)
class Templar:
@@ -563,27 +619,14 @@ class Templar:
'''
def __init__(self, loader, shared_loader_obj=None, variables=None):
- variables = {} if variables is None else variables
-
+ # NOTE shared_loader_obj is deprecated, ansible.plugins.loader is used
+ # directly. Keeping the arg for now in case 3rd party code "uses" it.
self._loader = loader
self._filters = None
self._tests = None
- self._available_variables = variables
+ self._available_variables = {} if variables is None else variables
self._cached_result = {}
-
- if loader:
- self._basedir = loader.get_basedir()
- else:
- self._basedir = './'
-
- if shared_loader_obj:
- self._filter_loader = getattr(shared_loader_obj, 'filter_loader')
- self._test_loader = getattr(shared_loader_obj, 'test_loader')
- self._lookup_loader = getattr(shared_loader_obj, 'lookup_loader')
- else:
- self._filter_loader = filter_loader
- self._test_loader = test_loader
- self._lookup_loader = lookup_loader
+ self._basedir = loader.get_basedir() if loader else './'
# flags to determine whether certain failures during templating
# should result in fatal errors being raised
@@ -591,7 +634,9 @@ class Templar:
self._fail_on_filter_errors = True
self._fail_on_undefined_errors = C.DEFAULT_UNDEFINED_VAR_BEHAVIOR
- self.environment = AnsibleEnvironment(
+ environment_class = AnsibleNativeEnvironment if USE_JINJA2_NATIVE else AnsibleEnvironment
+
+ self.environment = environment_class(
trim_blocks=True,
undefined=AnsibleUndefined,
extensions=self._get_extensions(),
@@ -611,56 +656,49 @@ class Templar:
# the current rendering context under which the templar class is working
self.cur_context = None
+ # FIXME these regular expressions should be re-compiled each time variable_start_string and variable_end_string are changed
self.SINGLE_VAR = re.compile(r"^%s\s*(\w*)\s*%s$" % (self.environment.variable_start_string, self.environment.variable_end_string))
-
- self._clean_regex = re.compile(r'(?:%s|%s|%s|%s)' % (
- self.environment.variable_start_string,
- self.environment.block_start_string,
- self.environment.block_end_string,
- self.environment.variable_end_string
- ))
self._no_type_regex = re.compile(r'.*?\|\s*(?:%s)(?:\([^\|]*\))?\s*\)?\s*(?:%s)' %
('|'.join(C.STRING_TYPE_FILTERS), self.environment.variable_end_string))
- def _get_filters(self):
- '''
- Returns filter plugins, after loading and caching them if need be
- '''
-
- if self._filters is not None:
- return self._filters.copy()
+ @property
+ def jinja2_native(self):
+ return not isinstance(self.environment, AnsibleEnvironment)
- self._filters = dict()
+ def copy_with_new_env(self, environment_class=AnsibleEnvironment, **kwargs):
+ r"""Creates a new copy of Templar with a new environment. The new environment is based on
+ given environment class and kwargs.
- for fp in self._filter_loader.all():
- self._filters.update(fp.filters())
+ :kwarg environment_class: Environment class used for creating a new environment.
+ :kwarg \*\*kwargs: Optional arguments for the new environment that override existing
+ environment attributes.
- if USE_JINJA2_NATIVE:
- for string_filter in C.STRING_TYPE_FILTERS:
- try:
- orig_filter = self._filters[string_filter]
- except KeyError:
- try:
- orig_filter = self.environment.filters[string_filter]
- except KeyError:
- continue
- self._filters[string_filter] = _wrap_native_text(orig_filter)
+ :returns: Copy of Templar with updated environment.
+ """
+ # We need to use __new__ to skip __init__, mainly not to create a new
+ # environment there only to override it below
+ new_env = object.__new__(environment_class)
+ new_env.__dict__.update(self.environment.__dict__)
- return self._filters.copy()
+ new_templar = object.__new__(Templar)
+ new_templar.__dict__.update(self.__dict__)
+ new_templar.environment = new_env
- def _get_tests(self):
- '''
- Returns tests plugins, after loading and caching them if need be
- '''
-
- if self._tests is not None:
- return self._tests.copy()
+ mapping = {
+ 'available_variables': new_templar,
+ 'searchpath': new_env.loader,
+ }
- self._tests = dict()
- for fp in self._test_loader.all():
- self._tests.update(fp.tests())
+ for key, value in kwargs.items():
+ obj = mapping.get(key, new_env)
+ try:
+ if value is not None:
+ setattr(obj, key, value)
+ except AttributeError:
+ # Ignore invalid attrs, lstrip_blocks was added in jinja2==2.7
+ pass
- return self._tests.copy()
+ return new_templar
def _get_extensions(self):
'''
@@ -740,7 +778,7 @@ class Templar:
set to True, the given data will be wrapped as a jinja2 variable ('{{foo}}')
before being sent through the template engine.
'''
- static_vars = [''] if static_vars is None else static_vars
+ static_vars = [] if static_vars is None else static_vars
# Don't template unsafe variables, just return them.
if hasattr(variable, '__UNSAFE__'):
@@ -795,7 +833,7 @@ class Templar:
disable_lookups=disable_lookups,
)
- if not USE_JINJA2_NATIVE:
+ if not self.jinja2_native:
unsafe = hasattr(result, '__UNSAFE__')
if convert_data and not self._no_type_regex.match(variable):
# if this looks like a dictionary or list, convert it to such using the safe_eval method
@@ -918,7 +956,7 @@ class Templar:
# unncessary
return list(thing)
- if USE_JINJA2_NATIVE:
+ if self.jinja2_native:
return thing
return thing if thing is not None else ''
@@ -944,60 +982,63 @@ class Templar:
return self._lookup(name, *args, **kwargs)
def _lookup(self, name, *args, **kwargs):
- instance = self._lookup_loader.get(name, loader=self._loader, templar=self)
+ instance = lookup_loader.get(name, loader=self._loader, templar=self)
- if instance is not None:
- wantlist = kwargs.pop('wantlist', False)
- allow_unsafe = kwargs.pop('allow_unsafe', C.DEFAULT_ALLOW_UNSAFE_LOOKUPS)
- errors = kwargs.pop('errors', 'strict')
+ if instance is None:
+ raise AnsibleError("lookup plugin (%s) not found" % name)
- from ansible.utils.listify import listify_lookup_plugin_terms
- loop_terms = listify_lookup_plugin_terms(terms=args, templar=self, loader=self._loader, fail_on_undefined=True, convert_bare=False)
- # safely catch run failures per #5059
- try:
- ran = instance.run(loop_terms, variables=self._available_variables, **kwargs)
- except (AnsibleUndefinedVariable, UndefinedError) as e:
- raise AnsibleUndefinedVariable(e)
- except Exception as e:
- if self._fail_on_lookup_errors:
- msg = u"An unhandled exception occurred while running the lookup plugin '%s'. Error was a %s, original message: %s" % \
- (name, type(e), to_text(e))
- if errors == 'warn':
- display.warning(msg)
- elif errors == 'ignore':
- display.display(msg, log_only=True)
- else:
- raise AnsibleError(to_native(msg))
- ran = [] if wantlist else None
+ wantlist = kwargs.pop('wantlist', False)
+ allow_unsafe = kwargs.pop('allow_unsafe', C.DEFAULT_ALLOW_UNSAFE_LOOKUPS)
+ errors = kwargs.pop('errors', 'strict')
- if ran and not allow_unsafe:
- if wantlist:
- ran = wrap_var(ran)
+ loop_terms = listify_lookup_plugin_terms(terms=args, templar=self, loader=self._loader, fail_on_undefined=True, convert_bare=False)
+ # safely catch run failures per #5059
+ try:
+ ran = instance.run(loop_terms, variables=self._available_variables, **kwargs)
+ except (AnsibleUndefinedVariable, UndefinedError) as e:
+ raise AnsibleUndefinedVariable(e)
+ except Exception as e:
+ if self._fail_on_lookup_errors:
+ msg = u"An unhandled exception occurred while running the lookup plugin '%s'. Error was a %s, original message: %s" % \
+ (name, type(e), to_text(e))
+ if errors == 'warn':
+ display.warning(msg)
+ elif errors == 'ignore':
+ display.display(msg, log_only=True)
else:
- try:
- ran = wrap_var(",".join(ran))
- except TypeError:
- # Lookup Plugins should always return lists. Throw an error if that's not
- # the case:
- if not isinstance(ran, Sequence):
- raise AnsibleError("The lookup plugin '%s' did not return a list."
- % name)
-
- # The TypeError we can recover from is when the value *inside* of the list
- # is not a string
- if len(ran) == 1:
- ran = wrap_var(ran[0])
- else:
- ran = wrap_var(ran)
+ raise AnsibleError(to_native(msg))
+ return [] if wantlist else None
- if self.cur_context:
- self.cur_context.unsafe = True
- return ran
- else:
- raise AnsibleError("lookup plugin (%s) not found" % name)
+ if ran and allow_unsafe is False:
+ if self.cur_context:
+ self.cur_context.unsafe = True
+
+ if wantlist:
+ return wrap_var(ran)
+
+ try:
+ if self.jinja2_native and isinstance(ran[0], NativeJinjaText):
+ ran = wrap_var(NativeJinjaText(",".join(ran)))
+ else:
+ ran = wrap_var(",".join(ran))
+ except TypeError:
+ # Lookup Plugins should always return lists. Throw an error if that's not
+ # the case:
+ if not isinstance(ran, Sequence):
+ raise AnsibleError("The lookup plugin '%s' did not return a list."
+ % name)
+
+ # The TypeError we can recover from is when the value *inside* of the list
+ # is not a string
+ if len(ran) == 1:
+ ran = wrap_var(ran[0])
+ else:
+ ran = wrap_var(ran)
+
+ return ran
def do_template(self, data, preserve_trailing_newlines=True, escape_backslashes=True, fail_on_undefined=None, overrides=None, disable_lookups=False):
- if USE_JINJA2_NATIVE and not isinstance(data, string_types):
+ if self.jinja2_native and not isinstance(data, string_types):
return data
# For preserving the number of input newlines in the output (used
@@ -1007,15 +1048,21 @@ class Templar:
if fail_on_undefined is None:
fail_on_undefined = self._fail_on_undefined_errors
+ has_template_overrides = data.startswith(JINJA2_OVERRIDE)
+
try:
- # allows template header overrides to change jinja2 options.
- if overrides is None:
+ # NOTE Creating an overlay that lives only inside do_template means that overrides are not applied
+ # when templating nested variables in AnsibleJ2Vars where Templar.environment is used, not the overlay.
+ # This is historic behavior that is kept for backwards compatibility.
+ if overrides:
+ myenv = self.environment.overlay(overrides)
+ elif has_template_overrides:
myenv = self.environment.overlay()
else:
- myenv = self.environment.overlay(overrides)
+ myenv = self.environment
# Get jinja env overrides from template
- if hasattr(data, 'startswith') and data.startswith(JINJA2_OVERRIDE):
+ if has_template_overrides:
eol = data.find('\n')
line = data[len(JINJA2_OVERRIDE):eol]
data = data[eol + 1:]
@@ -1024,13 +1071,6 @@ class Templar:
key = key.strip()
setattr(myenv, key, ast.literal_eval(val.strip()))
- # Adds Ansible custom filters and tests
- myenv.filters.update(self._get_filters())
- for k in myenv.filters:
- if not getattr(myenv.filters[k], '__UNROLLED__', False):
- myenv.filters[k] = _unroll_iterator(myenv.filters[k])
- myenv.tests.update(self._get_tests())
-
if escape_backslashes:
# Allow users to specify backslashes in playbooks as "\\" instead of as "\\\\".
data = _escape_backslashes(data, myenv)
@@ -1054,8 +1094,12 @@ class Templar:
rf = t.root_render_func(new_context)
try:
- res = j2_concat(rf)
- if getattr(new_context, 'unsafe', False):
+ if self.jinja2_native:
+ res = ansible_native_concat(rf)
+ else:
+ res = j2_concat(rf)
+ unsafe = getattr(new_context, 'unsafe', False)
+ if unsafe:
res = wrap_var(res)
except TypeError as te:
if 'AnsibleUndefined' in to_native(te):
@@ -1066,7 +1110,7 @@ class Templar:
display.debug("failing because of a type error, template data is: %s" % to_text(data))
raise AnsibleError("Unexpected templating type error occurred on (%s): %s" % (to_native(data), to_native(te)))
- if USE_JINJA2_NATIVE and not isinstance(res, string_types):
+ if self.jinja2_native and not isinstance(res, string_types):
return res
if preserve_trailing_newlines:
@@ -1085,6 +1129,8 @@ class Templar:
res_newlines = _count_newlines_from_end(res)
if data_newlines > res_newlines:
res += self.environment.newline_sequence * (data_newlines - res_newlines)
+ if unsafe:
+ res = wrap_var(res)
return res
except (UndefinedError, AnsibleUndefinedVariable) as e:
if fail_on_undefined:
diff --git a/lib/ansible/template/native_helpers.py b/lib/ansible/template/native_helpers.py
index 81bef436..4eb63061 100644
--- a/lib/ansible/template/native_helpers.py
+++ b/lib/ansible/template/native_helpers.py
@@ -17,10 +17,7 @@ from ansible.module_utils.common.collections import is_sequence, Mapping
from ansible.module_utils.common.text.converters import container_to_text
from ansible.module_utils.six import PY2, text_type
from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
-
-
-class NativeJinjaText(text_type):
- pass
+from ansible.utils.native_jinja import NativeJinjaText
def _fail_on_undefined(data):
diff --git a/lib/ansible/template/safe_eval.py b/lib/ansible/template/safe_eval.py
index 43ce250c..06bc7687 100644
--- a/lib/ansible/template/safe_eval.py
+++ b/lib/ansible/template/safe_eval.py
@@ -112,7 +112,7 @@ def safe_eval(expr, locals=None, include_exceptions=False):
for test in test_loader.all():
test_list.extend(test.tests().keys())
- CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list + test_list
+ CALL_ENABLED = C.CALLABLE_ACCEPT_LIST + filter_list + test_list
class CleansingNodeVisitor(ast.NodeVisitor):
def generic_visit(self, node, inside_call=False):
@@ -124,7 +124,7 @@ def safe_eval(expr, locals=None, include_exceptions=False):
# Disallow calls to builtin functions that we have not vetted
# as safe. Other functions are excluded by setting locals in
# the call to eval() later on
- if hasattr(builtins, node.id) and node.id not in CALL_WHITELIST:
+ if hasattr(builtins, node.id) and node.id not in CALL_ENABLED:
raise Exception("invalid function: %s" % node.id)
# iterate over all child nodes
for child_node in ast.iter_child_nodes(node):
@@ -140,7 +140,7 @@ def safe_eval(expr, locals=None, include_exceptions=False):
try:
parsed_tree = ast.parse(expr, mode='eval')
cnv.visit(parsed_tree)
- compiled = compile(parsed_tree, to_native(expr), 'eval')
+ compiled = compile(parsed_tree, '<expr %s>' % to_native(expr), 'eval')
# Note: passing our own globals and locals here constrains what
# callables (and other identifiers) are recognized. this is in
# addition to the filtering of builtins done in CleansingNodeVisitor
diff --git a/lib/ansible/template/template.py b/lib/ansible/template/template.py
index 5a555883..3e7738bf 100644
--- a/lib/ansible/template/template.py
+++ b/lib/ansible/template/template.py
@@ -33,11 +33,13 @@ class AnsibleJ2Template(jinja2.environment.Template):
'''
def new_context(self, vars=None, shared=False, locals=None):
- if vars is not None:
- if isinstance(vars, dict):
- vars = vars.copy()
- if locals is not None:
- vars.update(locals)
- else:
- vars = vars.add_locals(locals)
+ if vars is None:
+ vars = dict(self.globals or ())
+
+ if isinstance(vars, dict):
+ vars = vars.copy()
+ if locals is not None:
+ vars.update(locals)
+ else:
+ vars = vars.add_locals(locals)
return self.environment.context_class(self.environment, vars, self.name, self.blocks)
diff --git a/lib/ansible/utils/collection_loader/_collection_config.py b/lib/ansible/utils/collection_loader/_collection_config.py
index e717cde9..096797f0 100644
--- a/lib/ansible/utils/collection_loader/_collection_config.py
+++ b/lib/ansible/utils/collection_loader/_collection_config.py
@@ -67,8 +67,6 @@ class _AnsibleCollectionConfig(type):
@default_collection.setter
def default_collection(cls, value):
- if cls._default_collection:
- raise ValueError('default collection {0} has already been configured'.format(value))
cls._default_collection = value
diff --git a/lib/ansible/utils/collection_loader/_collection_finder.py b/lib/ansible/utils/collection_loader/_collection_finder.py
index 99d5ddc9..b3984265 100644
--- a/lib/ansible/utils/collection_loader/_collection_finder.py
+++ b/lib/ansible/utils/collection_loader/_collection_finder.py
@@ -9,6 +9,8 @@ import os.path
import pkgutil
import re
import sys
+from keyword import iskeyword
+from tokenize import Name as _VALID_IDENTIFIER_REGEX
# DO NOT add new non-stdlib import deps here, this loader is used by external tools (eg ansible-test import sanity)
@@ -40,6 +42,29 @@ except ImportError:
_meta_yml_to_dict = None
+if not hasattr(__builtins__, 'ModuleNotFoundError'):
+ # this was introduced in Python 3.6
+ ModuleNotFoundError = ImportError
+
+
+_VALID_IDENTIFIER_STRING_REGEX = re.compile(
+ ''.join((_VALID_IDENTIFIER_REGEX, r'\Z')),
+)
+
+
+try: # NOTE: py3/py2 compat
+ # py2 mypy can't deal with try/excepts
+ is_python_identifier = str.isidentifier # type: ignore[attr-defined]
+except AttributeError: # Python 2
+ def is_python_identifier(tested_str): # type: (str) -> bool
+ """Determine whether the given string is a Python identifier."""
+ # Ref: https://stackoverflow.com/a/55802320/595220
+ return bool(re.match(_VALID_IDENTIFIER_STRING_REGEX, tested_str))
+
+
+PB_EXTENSIONS = ('.yml', '.yaml')
+
+
class _AnsibleCollectionFinder:
def __init__(self, paths=None, scan_sys_paths=True):
# TODO: accept metadata loader override
@@ -53,19 +78,22 @@ class _AnsibleCollectionFinder:
# expand any placeholders in configured paths
paths = [os.path.expanduser(to_native(p, errors='surrogate_or_strict')) for p in paths]
+ # add syspaths if needed
if scan_sys_paths:
- # append all sys.path entries with an ansible_collections package
- for path in sys.path:
- if (
- path not in paths and
- os.path.isdir(to_bytes(
- os.path.join(path, 'ansible_collections'),
- errors='surrogate_or_strict',
- ))
- ):
- paths.append(path)
-
- self._n_configured_paths = paths
+ paths.extend(sys.path)
+
+ good_paths = []
+ # expand any placeholders in configured paths
+ for p in paths:
+
+ # ensure we alway shave ansible_collections
+ if os.path.basename(p) == 'ansible_collections':
+ p = os.path.dirname(p)
+
+ if p not in good_paths and os.path.isdir(to_bytes(os.path.join(p, 'ansible_collections'), errors='surrogate_or_strict')):
+ good_paths.append(p)
+
+ self._n_configured_paths = good_paths
self._n_cached_collection_paths = None
self._n_cached_collection_qualified_paths = None
@@ -103,8 +131,14 @@ class _AnsibleCollectionFinder:
path = to_native(path)
interesting_paths = self._n_cached_collection_qualified_paths
if not interesting_paths:
- interesting_paths = [os.path.join(p, 'ansible_collections') for p in
- self._n_collection_paths]
+ interesting_paths = []
+ for p in self._n_collection_paths:
+ if os.path.basename(p) != 'ansible_collections':
+ p = os.path.join(p, 'ansible_collections')
+
+ if p not in interesting_paths:
+ interesting_paths.append(p)
+
interesting_paths.insert(0, self._ansible_pkg_path)
self._n_cached_collection_qualified_paths = interesting_paths
@@ -663,12 +697,11 @@ class AnsibleCollectionRef:
VALID_REF_TYPES = frozenset(to_text(r) for r in ['action', 'become', 'cache', 'callback', 'cliconf', 'connection',
'doc_fragments', 'filter', 'httpapi', 'inventory', 'lookup',
'module_utils', 'modules', 'netconf', 'role', 'shell', 'strategy',
- 'terminal', 'test', 'vars'])
+ 'terminal', 'test', 'vars', 'playbook'])
# FIXME: tighten this up to match Python identifier reqs, etc
- VALID_COLLECTION_NAME_RE = re.compile(to_text(r'^(\w+)\.(\w+)$'))
VALID_SUBDIRS_RE = re.compile(to_text(r'^\w+(\.\w+)*$'))
- VALID_FQCR_RE = re.compile(to_text(r'^\w+\.\w+\.\w+(\.\w+)*$')) # can have 0-N included subdirs as well
+ VALID_FQCR_RE = re.compile(to_text(r'^\w+(\.\w+){2,}$')) # can have 0-N included subdirs as well
def __init__(self, collection_name, subdirs, resource, ref_type):
"""
@@ -708,6 +741,8 @@ class AnsibleCollectionRef:
if self.ref_type == u'role':
package_components.append(u'roles')
+ elif self.ref_type == u'playbook':
+ package_components.append(u'playbooks')
else:
# we assume it's a plugin
package_components += [u'plugins', self.ref_type]
@@ -716,8 +751,8 @@ class AnsibleCollectionRef:
package_components.append(self.subdirs)
fqcr_components.append(self.subdirs)
- if self.ref_type == u'role':
- # roles are their own resource
+ if self.ref_type in (u'role', u'playbook'):
+ # playbooks and roles are their own resource
package_components.append(self.resource)
fqcr_components.append(self.resource)
@@ -751,10 +786,17 @@ class AnsibleCollectionRef:
ref = to_text(ref, errors='strict')
ref_type = to_text(ref_type, errors='strict')
+ ext = ''
- resource_splitname = ref.rsplit(u'.', 1)
- package_remnant = resource_splitname[0]
- resource = resource_splitname[1]
+ if ref_type == u'playbook' and ref.endswith(PB_EXTENSIONS):
+ resource_splitname = ref.rsplit(u'.', 2)
+ package_remnant = resource_splitname[0]
+ resource = resource_splitname[1]
+ ext = '.' + resource_splitname[2]
+ else:
+ resource_splitname = ref.rsplit(u'.', 1)
+ package_remnant = resource_splitname[0]
+ resource = resource_splitname[1]
# split the left two components of the collection package name off, anything remaining is plugin-type
# specific subdirs to be added back on below the plugin type
@@ -766,7 +808,7 @@ class AnsibleCollectionRef:
collection_name = u'.'.join(package_splitname[0:2])
- return AnsibleCollectionRef(collection_name, subdirs, resource, ref_type)
+ return AnsibleCollectionRef(collection_name, subdirs, resource + ext, ref_type)
@staticmethod
def try_parse_fqcr(ref, ref_type):
@@ -826,26 +868,71 @@ class AnsibleCollectionRef:
collection_name = to_text(collection_name)
- return bool(re.match(AnsibleCollectionRef.VALID_COLLECTION_NAME_RE, collection_name))
+ if collection_name.count(u'.') != 1:
+ return False
+
+ return all(
+ # NOTE: keywords and identifiers are different in differnt Pythons
+ not iskeyword(ns_or_name) and is_python_identifier(ns_or_name)
+ for ns_or_name in collection_name.split(u'.')
+ )
+
+
+def _get_collection_playbook_path(playbook):
+
+ acr = AnsibleCollectionRef.try_parse_fqcr(playbook, u'playbook')
+ if acr:
+ try:
+ # get_collection_path
+ pkg = import_module(acr.n_python_collection_package_name)
+ except (IOError, ModuleNotFoundError) as e:
+ # leaving e as debug target, even though not used in normal code
+ pkg = None
+
+ if pkg:
+ cpath = os.path.join(sys.modules[acr.n_python_collection_package_name].__file__.replace('__synthetic__', 'playbooks'))
+
+ if acr.subdirs:
+ paths = [to_native(x) for x in acr.subdirs.split(u'.')]
+ paths.insert(0, cpath)
+ cpath = os.path.join(*paths)
+
+ path = os.path.join(cpath, to_native(acr.resource))
+ if os.path.exists(to_bytes(path)):
+ return acr.resource, path, acr.collection
+ elif not acr.resource.endswith(PB_EXTENSIONS):
+ for ext in PB_EXTENSIONS:
+ path = os.path.join(cpath, to_native(acr.resource + ext))
+ if os.path.exists(to_bytes(path)):
+ return acr.resource, path, acr.collection
+ return None
def _get_collection_role_path(role_name, collection_list=None):
- acr = AnsibleCollectionRef.try_parse_fqcr(role_name, 'role')
+ return _get_collection_resource_path(role_name, u'role', collection_list)
+
+
+def _get_collection_resource_path(name, ref_type, collection_list=None):
+
+ if ref_type == u'playbook':
+ # they are handled a bit diff due to 'extension variance' and no collection_list
+ return _get_collection_playbook_path(name)
+ acr = AnsibleCollectionRef.try_parse_fqcr(name, ref_type)
if acr:
# looks like a valid qualified collection ref; skip the collection_list
collection_list = [acr.collection]
subdirs = acr.subdirs
resource = acr.resource
elif not collection_list:
- return None # not a FQ role and no collection search list spec'd, nothing to do
+ return None # not a FQ and no collection search list spec'd, nothing to do
else:
- resource = role_name # treat as unqualified, loop through the collection search list to try and resolve
+ resource = name # treat as unqualified, loop through the collection search list to try and resolve
subdirs = ''
for collection_name in collection_list:
try:
- acr = AnsibleCollectionRef(collection_name=collection_name, subdirs=subdirs, resource=resource, ref_type='role')
+ acr = AnsibleCollectionRef(collection_name=collection_name, subdirs=subdirs, resource=resource, ref_type=ref_type)
# FIXME: error handling/logging; need to catch any import failures and move along
pkg = import_module(acr.n_python_package_name)
@@ -854,7 +941,7 @@ def _get_collection_role_path(role_name, collection_list=None):
path = os.path.dirname(to_bytes(sys.modules[acr.n_python_package_name].__file__, errors='surrogate_or_strict'))
return resource, to_text(path, errors='surrogate_or_strict'), collection_name
- except IOError:
+ except (IOError, ModuleNotFoundError) as e:
continue
except Exception as ex:
# FIXME: pick out typical import errors first, then error logging
@@ -872,8 +959,8 @@ def _get_collection_name_from_path(path):
:return: collection name or None
"""
- # FIXME: mess with realpath canonicalization or not?
- path = to_native(path)
+ # ensure we compare full paths since pkg path will be abspath
+ path = to_native(os.path.abspath(to_bytes(path)))
path_parts = path.split('/')
if path_parts.count('ansible_collections') != 1:
@@ -898,6 +985,7 @@ def _get_collection_name_from_path(path):
original_path_prefix = os.path.join('/', *path_parts[0:ac_pos + 3])
+ imported_pkg_path = to_native(os.path.abspath(to_bytes(imported_pkg_path)))
if original_path_prefix != imported_pkg_path:
return None
diff --git a/lib/ansible/utils/color.py b/lib/ansible/utils/color.py
index 8762b44f..be8fb004 100644
--- a/lib/ansible/utils/color.py
+++ b/lib/ansible/utils/color.py
@@ -51,21 +51,6 @@ if C.ANSIBLE_FORCE_COLOR:
# domain - there is no license except that you must leave this header.
#
# Copyright (C) 2008 Brian Nez <thedude at bri1 dot com>
-#
-# http://nezzen.net/2008/06/23/colored-text-in-python-using-ansi-escape-sequences/
-
-codeCodes = {
- 'black': u'0;30', 'bright gray': u'0;37',
- 'blue': u'0;34', 'white': u'1;37',
- 'green': u'0;32', 'bright blue': u'1;34',
- 'cyan': u'0;36', 'bright green': u'1;32',
- 'red': u'0;31', 'bright cyan': u'1;36',
- 'purple': u'0;35', 'bright red': u'1;31',
- 'yellow': u'0;33', 'bright purple': u'1;35',
- 'dark gray': u'1;30', 'bright yellow': u'1;33',
- 'magenta': u'0;35', 'bright magenta': u'1;35',
- 'normal': u'0',
-}
def parsecolor(color):
@@ -74,7 +59,7 @@ def parsecolor(color):
r"|(?P<rgb>rgb(?P<red>[0-5])(?P<green>[0-5])(?P<blue>[0-5]))"
r"|gray(?P<gray>[0-9]+)", color)
if not matches:
- return codeCodes[color]
+ return C.COLOR_CODES[color]
if matches.group('color'):
return u'38;5;%d' % int(matches.group('color'))
if matches.group('rgb'):
diff --git a/lib/ansible/utils/display.py b/lib/ansible/utils/display.py
index ca3dc3c2..95cce2bb 100644
--- a/lib/ansible/utils/display.py
+++ b/lib/ansible/utils/display.py
@@ -18,6 +18,7 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+import ctypes.util
import errno
import fcntl
import getpass
@@ -36,7 +37,7 @@ from termios import TIOCGWINSZ
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleAssertionError
from ansible.module_utils._text import to_bytes, to_text, to_native
-from ansible.module_utils.six import with_metaclass, string_types
+from ansible.module_utils.six import with_metaclass, text_type
from ansible.utils.color import stringc
from ansible.utils.singleton import Singleton
from ansible.utils.unsafe_proxy import wrap_var
@@ -48,6 +49,100 @@ except NameError:
# Python 3, we already have raw_input
pass
+_LIBC = ctypes.cdll.LoadLibrary(ctypes.util.find_library('c'))
+# Set argtypes, to avoid segfault if the wrong type is provided,
+# restype is assumed to be c_int
+_LIBC.wcwidth.argtypes = (ctypes.c_wchar,)
+_LIBC.wcswidth.argtypes = (ctypes.c_wchar_p, ctypes.c_int)
+# Max for c_int
+_MAX_INT = 2 ** (ctypes.sizeof(ctypes.c_int) * 8 - 1) - 1
+
+_LOCALE_INITIALIZED = False
+_LOCALE_INITIALIZATION_ERR = None
+
+
+def initialize_locale():
+ """Set the locale to the users default setting
+ and set ``_LOCALE_INITIALIZED`` to indicate whether
+ ``get_text_width`` may run into trouble
+ """
+ global _LOCALE_INITIALIZED, _LOCALE_INITIALIZATION_ERR
+ if _LOCALE_INITIALIZED is False:
+ try:
+ locale.setlocale(locale.LC_ALL, '')
+ except locale.Error as e:
+ _LOCALE_INITIALIZATION_ERR = e
+ else:
+ _LOCALE_INITIALIZED = True
+
+
+def get_text_width(text):
+ """Function that utilizes ``wcswidth`` or ``wcwidth`` to determine the
+ number of columns used to display a text string.
+
+ We try first with ``wcswidth``, and fallback to iterating each
+ character and using wcwidth individually, falling back to a value of 0
+ for non-printable wide characters
+
+ On Py2, this depends on ``locale.setlocale(locale.LC_ALL, '')``,
+ that in the case of Ansible is done in ``bin/ansible``
+ """
+ if not isinstance(text, text_type):
+ raise TypeError('get_text_width requires text, not %s' % type(text))
+
+ if _LOCALE_INITIALIZATION_ERR:
+ Display().warning(
+ 'An error occurred while calling ansible.utils.display.initialize_locale '
+ '(%s). This may result in incorrectly calculated text widths that can '
+ 'cause Display to print incorrect line lengths' % _LOCALE_INITIALIZATION_ERR
+ )
+ elif not _LOCALE_INITIALIZED:
+ Display().warning(
+ 'ansible.utils.display.initialize_locale has not been called, '
+ 'this may result in incorrectly calculated text widths that can '
+ 'cause Display to print incorrect line lengths'
+ )
+
+ try:
+ width = _LIBC.wcswidth(text, _MAX_INT)
+ except ctypes.ArgumentError:
+ width = -1
+ if width != -1:
+ return width
+
+ width = 0
+ counter = 0
+ for c in text:
+ counter += 1
+ if c in (u'\x08', u'\x7f', u'\x94', u'\x1b'):
+ # A few characters result in a subtraction of length:
+ # BS, DEL, CCH, ESC
+ # ESC is slightly different in that it's part of an escape sequence, and
+ # while ESC is non printable, it's part of an escape sequence, which results
+ # in a single non printable length
+ width -= 1
+ counter -= 1
+ continue
+
+ try:
+ w = _LIBC.wcwidth(c)
+ except ctypes.ArgumentError:
+ w = -1
+ if w == -1:
+ # -1 signifies a non-printable character
+ # use 0 here as a best effort
+ w = 0
+ width += w
+
+ if width == 0 and counter and not _LOCALE_INITIALIZED:
+ raise EnvironmentError(
+ 'ansible.utils.display.initialize_locale has not been called, '
+ 'and get_text_width could not calculate text width of %r' % text
+ )
+
+ # It doesn't make sense to have a negative printable width
+ return width if width >= 0 else 0
+
class FilterBlackList(logging.Filter):
def __init__(self, blacklist):
@@ -131,8 +226,8 @@ class Display(with_metaclass(Singleton, object)):
cmd = subprocess.Popen([self.b_cowsay, "-l"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
self.cows_available = set([to_text(c) for c in out.split()])
- if C.ANSIBLE_COW_WHITELIST and any(C.ANSIBLE_COW_WHITELIST):
- self.cows_available = set(C.ANSIBLE_COW_WHITELIST).intersection(self.cows_available)
+ if C.ANSIBLE_COW_ACCEPTLIST and any(C.ANSIBLE_COW_ACCEPTLIST):
+ self.cows_available = set(C.ANSIBLE_COW_ACCEPTLIST).intersection(self.cows_available)
except Exception:
# could not execute cowsay for some reason
self.b_cowsay = False
@@ -257,7 +352,7 @@ class Display(with_metaclass(Singleton, object)):
msg += '.'
if collection_name == 'ansible.builtin':
- collection_name = 'ansible-base'
+ collection_name = 'ansible-core'
if removed:
header = '[DEPRECATED]: {0}'.format(msg)
@@ -322,6 +417,8 @@ class Display(with_metaclass(Singleton, object)):
'''
Prints a header-looking line with cowsay or stars with length depending on terminal width (3 minimum)
'''
+ msg = to_text(msg)
+
if self.b_cowsay and cows:
try:
self.banner_cowsay(msg)
@@ -330,7 +427,10 @@ class Display(with_metaclass(Singleton, object)):
self.warning("somebody cleverly deleted cowsay or something during the PB run. heh.")
msg = msg.strip()
- star_len = self.columns - len(msg)
+ try:
+ star_len = self.columns - get_text_width(msg)
+ except EnvironmentError:
+ star_len = self.columns - len(msg)
if star_len <= 3:
star_len = 3
stars = u"*" * star_len
diff --git a/lib/ansible/utils/encrypt.py b/lib/ansible/utils/encrypt.py
index 4a35d8cf..a82b1d3e 100644
--- a/lib/ansible/utils/encrypt.py
+++ b/lib/ansible/utils/encrypt.py
@@ -4,9 +4,9 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-import crypt
import multiprocessing
import random
+import re
import string
import sys
@@ -18,15 +18,26 @@ from ansible.module_utils.six import text_type
from ansible.module_utils._text import to_text, to_bytes
from ansible.utils.display import Display
-PASSLIB_AVAILABLE = False
+PASSLIB_E = CRYPT_E = None
+HAS_CRYPT = PASSLIB_AVAILABLE = False
try:
import passlib
import passlib.hash
from passlib.utils.handlers import HasRawSalt
-
+ try:
+ from passlib.utils.binary import bcrypt64
+ except ImportError:
+ from passlib.utils import bcrypt64
PASSLIB_AVAILABLE = True
-except Exception:
- pass
+except Exception as e:
+ PASSLIB_E = e
+
+try:
+ import crypt
+ HAS_CRYPT = True
+except Exception as e:
+ CRYPT_E = e
+
display = Display()
@@ -61,12 +72,12 @@ def random_salt(length=8):
class BaseHash(object):
- algo = namedtuple('algo', ['crypt_id', 'salt_size', 'implicit_rounds'])
+ algo = namedtuple('algo', ['crypt_id', 'salt_size', 'implicit_rounds', 'salt_exact'])
algorithms = {
- 'md5_crypt': algo(crypt_id='1', salt_size=8, implicit_rounds=None),
- 'bcrypt': algo(crypt_id='2a', salt_size=22, implicit_rounds=None),
- 'sha256_crypt': algo(crypt_id='5', salt_size=16, implicit_rounds=5000),
- 'sha512_crypt': algo(crypt_id='6', salt_size=16, implicit_rounds=5000),
+ 'md5_crypt': algo(crypt_id='1', salt_size=8, implicit_rounds=None, salt_exact=False),
+ 'bcrypt': algo(crypt_id='2a', salt_size=22, implicit_rounds=None, salt_exact=True),
+ 'sha256_crypt': algo(crypt_id='5', salt_size=16, implicit_rounds=5000, salt_exact=False),
+ 'sha512_crypt': algo(crypt_id='6', salt_size=16, implicit_rounds=5000, salt_exact=False),
}
def __init__(self, algorithm):
@@ -77,6 +88,9 @@ class CryptHash(BaseHash):
def __init__(self, algorithm):
super(CryptHash, self).__init__(algorithm)
+ if not HAS_CRYPT:
+ raise AnsibleError("crypt.crypt cannot be used as the 'crypt' python library is not installed or is unusable.", orig_exc=CRYPT_E)
+
if sys.platform.startswith('darwin'):
raise AnsibleError("crypt.crypt not supported on Mac OS X/Darwin, install passlib python module")
@@ -91,7 +105,14 @@ class CryptHash(BaseHash):
def _salt(self, salt, salt_size):
salt_size = salt_size or self.algo_data.salt_size
- return salt or random_salt(salt_size)
+ ret = salt or random_salt(salt_size)
+ if re.search(r'[^./0-9A-Za-z]', ret):
+ raise AnsibleError("invalid characters in salt")
+ if self.algo_data.salt_exact and len(ret) != self.algo_data.salt_size:
+ raise AnsibleError("invalid salt size")
+ elif not self.algo_data.salt_exact and len(ret) > self.algo_data.salt_size:
+ raise AnsibleError("invalid salt size")
+ return ret
def _rounds(self, rounds):
if rounds == self.algo_data.implicit_rounds:
@@ -106,13 +127,23 @@ class CryptHash(BaseHash):
saltstring = "$%s$%s" % (self.algo_data.crypt_id, salt)
else:
saltstring = "$%s$rounds=%d$%s" % (self.algo_data.crypt_id, rounds, salt)
- result = crypt.crypt(secret, saltstring)
- # crypt.crypt returns None if it cannot parse saltstring
+ # crypt.crypt on Python < 3.9 returns None if it cannot parse saltstring
+ # On Python >= 3.9, it throws OSError.
+ try:
+ result = crypt.crypt(secret, saltstring)
+ orig_exc = None
+ except OSError as e:
+ result = None
+ orig_exc = e
+
# None as result would be interpreted by the some modules (user module)
# as no password at all.
if not result:
- raise AnsibleError("crypt.crypt does not support '%s' algorithm" % self.algorithm)
+ raise AnsibleError(
+ "crypt.crypt does not support '%s' algorithm" % self.algorithm,
+ orig_exc=orig_exc,
+ )
return result
@@ -122,7 +153,7 @@ class PasslibHash(BaseHash):
super(PasslibHash, self).__init__(algorithm)
if not PASSLIB_AVAILABLE:
- raise AnsibleError("passlib must be installed to hash with '%s'" % algorithm)
+ raise AnsibleError("passlib must be installed and usable to hash with '%s'" % algorithm, orig_exc=PASSLIB_E)
try:
self.crypt_algo = getattr(passlib.hash, algorithm)
@@ -138,9 +169,15 @@ class PasslibHash(BaseHash):
if not salt:
return None
elif issubclass(self.crypt_algo, HasRawSalt):
- return to_bytes(salt, encoding='ascii', errors='strict')
+ ret = to_bytes(salt, encoding='ascii', errors='strict')
else:
- return to_text(salt, encoding='ascii', errors='strict')
+ ret = to_text(salt, encoding='ascii', errors='strict')
+
+ # Ensure the salt has the correct padding
+ if self.algorithm == 'bcrypt':
+ ret = bcrypt64.repair_unused(ret)
+
+ return ret
def _clean_rounds(self, rounds):
algo_data = self.algorithms.get(self.algorithm)
@@ -189,8 +226,10 @@ class PasslibHash(BaseHash):
def passlib_or_crypt(secret, algorithm, salt=None, salt_size=None, rounds=None):
if PASSLIB_AVAILABLE:
return PasslibHash(algorithm).hash(secret, salt=salt, salt_size=salt_size, rounds=rounds)
- else:
+ elif HAS_CRYPT:
return CryptHash(algorithm).hash(secret, salt=salt, salt_size=salt_size, rounds=rounds)
+ else:
+ raise AnsibleError("Unable to encrypt nor hash, either crypt or passlib must be installed.", orig_exc=CRYPT_E)
def do_encrypt(result, encrypt, salt_size=None, salt=None):
diff --git a/lib/ansible/utils/listify.py b/lib/ansible/utils/listify.py
index 709eae5f..89a6583c 100644
--- a/lib/ansible/utils/listify.py
+++ b/lib/ansible/utils/listify.py
@@ -21,7 +21,6 @@ __metaclass__ = type
from ansible.module_utils.six import string_types
from ansible.module_utils.common._collections_compat import Iterable
-from ansible.template.safe_eval import safe_eval
__all__ = ['listify_lookup_plugin_terms']
diff --git a/lib/ansible/utils/lock.py b/lib/ansible/utils/lock.py
new file mode 100644
index 00000000..34387dc5
--- /dev/null
+++ b/lib/ansible/utils/lock.py
@@ -0,0 +1,43 @@
+# Copyright (c) 2020 Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from functools import wraps
+
+
+def lock_decorator(attr='missing_lock_attr', lock=None):
+ '''This decorator is a generic implementation that allows you
+ to either use a pre-defined instance attribute as the location
+ of the lock, or to explicitly pass a lock object.
+
+ This code was implemented with ``threading.Lock`` in mind, but
+ may work with other locks, assuming that they function as
+ context managers.
+
+ When using ``attr``, the assumption is the first argument to
+ the wrapped method, is ``self`` or ``cls``.
+
+ Examples:
+
+ @lock_decorator(attr='_callback_lock')
+ def send_callback(...):
+
+ @lock_decorator(lock=threading.Lock())
+ def some_method(...):
+ '''
+ def outer(func):
+ @wraps(func)
+ def inner(*args, **kwargs):
+ # Python2 doesn't have ``nonlocal``
+ # assign the actual lock to ``_lock``
+ if lock is None:
+ _lock = getattr(args[0], attr)
+ else:
+ _lock = lock
+ with _lock:
+ return func(*args, **kwargs)
+ return inner
+ return outer
diff --git a/lib/ansible/utils/native_jinja.py b/lib/ansible/utils/native_jinja.py
new file mode 100644
index 00000000..53ef1400
--- /dev/null
+++ b/lib/ansible/utils/native_jinja.py
@@ -0,0 +1,13 @@
+# Copyright: (c) 2020, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible.module_utils.six import text_type
+
+
+class NativeJinjaText(text_type):
+ pass
diff --git a/lib/ansible/utils/plugin_docs.py b/lib/ansible/utils/plugin_docs.py
index 6522f76e..7dca58e4 100644
--- a/lib/ansible/utils/plugin_docs.py
+++ b/lib/ansible/utils/plugin_docs.py
@@ -18,7 +18,7 @@ display = Display()
# modules that are ok that they do not have documentation strings
-BLACKLIST = {
+REJECTLIST = {
'MODULE': frozenset(('async_wrapper',)),
'CACHE': frozenset(('base',)),
}
diff --git a/lib/ansible/utils/ssh_functions.py b/lib/ansible/utils/ssh_functions.py
index 11ab7e13..ec8984d5 100644
--- a/lib/ansible/utils/ssh_functions.py
+++ b/lib/ansible/utils/ssh_functions.py
@@ -51,6 +51,7 @@ def check_for_controlpersist(ssh_executable):
return has_cp
+# TODO: move to 'smart' connection plugin that subclasses to ssh/paramiko as needed.
def set_default_transport():
# deal with 'smart' connection .. one time ..
@@ -59,7 +60,7 @@ def set_default_transport():
# not be as common anymore.
# see if SSH can support ControlPersist if not use paramiko
- if not check_for_controlpersist(C.ANSIBLE_SSH_EXECUTABLE) and paramiko is not None:
+ if not check_for_controlpersist('ssh') and paramiko is not None:
C.DEFAULT_TRANSPORT = "paramiko"
else:
C.DEFAULT_TRANSPORT = "ssh"
diff --git a/lib/ansible/utils/unsafe_proxy.py b/lib/ansible/utils/unsafe_proxy.py
index 8c5d2261..7ed3cffc 100644
--- a/lib/ansible/utils/unsafe_proxy.py
+++ b/lib/ansible/utils/unsafe_proxy.py
@@ -57,6 +57,7 @@ from ansible.module_utils._text import to_bytes, to_text
from ansible.module_utils.common._collections_compat import Mapping, Set
from ansible.module_utils.common.collections import is_sequence
from ansible.module_utils.six import string_types, binary_type, text_type
+from ansible.utils.native_jinja import NativeJinjaText
__all__ = ['AnsibleUnsafe', 'wrap_var']
@@ -78,6 +79,10 @@ class AnsibleUnsafeText(text_type, AnsibleUnsafe):
return AnsibleUnsafeBytes(super(AnsibleUnsafeText, self).encode(*args, **kwargs))
+class NativeJinjaUnsafeText(NativeJinjaText, AnsibleUnsafeText):
+ pass
+
+
class UnsafeProxy(object):
def __new__(cls, obj, *args, **kwargs):
from ansible.utils.display import Display
@@ -123,6 +128,8 @@ def wrap_var(v):
v = _wrap_set(v)
elif is_sequence(v):
v = _wrap_sequence(v)
+ elif isinstance(v, NativeJinjaText):
+ v = NativeJinjaUnsafeText(v)
elif isinstance(v, binary_type):
v = AnsibleUnsafeBytes(v)
elif isinstance(v, text_type):
diff --git a/lib/ansible/vars/manager.py b/lib/ansible/vars/manager.py
index b690260c..2c22ad93 100644
--- a/lib/ansible/vars/manager.py
+++ b/lib/ansible/vars/manager.py
@@ -328,6 +328,9 @@ class VariableManager:
for vars_file_item in vars_files:
# create a set of temporary vars here, which incorporate the extra
# and magic vars so we can properly template the vars_files entries
+ # NOTE: this makes them depend on host vars/facts so things like
+ # ansible_facts['os_distribution'] can be used, ala include_vars.
+ # Consider DEPRECATING this in the future, since we have include_vars ...
temp_vars = combine_vars(all_vars, self._extra_vars)
temp_vars = combine_vars(temp_vars, magic_variables)
templar = Templar(loader=self._loader, variables=temp_vars)
@@ -363,9 +366,9 @@ class VariableManager:
except AnsibleParserError:
raise
else:
- # if include_delegate_to is set to False, we ignore the missing
- # vars file here because we're working on a delegated host
- if include_delegate_to:
+ # if include_delegate_to is set to False or we don't have a host, we ignore the missing
+ # vars file here because we're working on a delegated host or require host vars, see NOTE above
+ if include_delegate_to and host:
raise AnsibleFileNotFound("vars file %s was not found" % vars_file_item)
except (UndefinedError, AnsibleUndefinedVariable):
if host is not None and self._fact_cache.get(host.name, dict()).get('module_setup') and task is not None:
@@ -426,16 +429,16 @@ class VariableManager:
if task:
all_vars['environment'] = task.environment
- # if we have a task and we're delegating to another host, figure out the
- # variables for that host now so we don't have to rely on hostvars later
- if task and task.delegate_to is not None and include_delegate_to:
- all_vars['ansible_delegated_vars'], all_vars['_ansible_loop_cache'] = self._get_delegated_vars(play, task, all_vars)
-
# 'vars' magic var
if task or play:
# has to be copy, otherwise recursive ref
all_vars['vars'] = all_vars.copy()
+ # if we have a host and task and we're delegating to another host,
+ # figure out the variables for that host now so we don't have to rely on host vars later
+ if task and host and task.delegate_to is not None and include_delegate_to:
+ all_vars['ansible_delegated_vars'], all_vars['_ansible_loop_cache'] = self._get_delegated_vars(play, task, all_vars)
+
display.debug("done with get_vars()")
if C.DEFAULT_DEBUG:
# Use VarsWithSources wrapper class to display var sources
@@ -443,8 +446,7 @@ class VariableManager:
else:
return all_vars
- def _get_magic_variables(self, play, host, task, include_hostvars, include_delegate_to,
- _hosts=None, _hosts_all=None):
+ def _get_magic_variables(self, play, host, task, include_hostvars, include_delegate_to, _hosts=None, _hosts_all=None):
'''
Returns a dictionary of so-called "magic" variables in Ansible,
which are special variables we set internally for use.
@@ -517,6 +519,12 @@ class VariableManager:
return variables
def _get_delegated_vars(self, play, task, existing_variables):
+ # This method has a lot of code copied from ``TaskExecutor._get_loop_items``
+ # if this is failing, and ``TaskExecutor._get_loop_items`` is not
+ # then more will have to be copied here.
+ # TODO: dedupe code here and with ``TaskExecutor._get_loop_items``
+ # this may be possible once we move pre-processing pre fork
+
if not hasattr(task, 'loop'):
# This "task" is not a Task, so we need to skip it
return {}, None
@@ -525,16 +533,41 @@ class VariableManager:
# as we're fetching vars before post_validate has been called on
# the task that has been passed in
vars_copy = existing_variables.copy()
+
+ # get search path for this task to pass to lookup plugins
+ vars_copy['ansible_search_path'] = task.get_search_path()
+
+ # ensure basedir is always in (dwim already searches here but we need to display it)
+ if self._loader.get_basedir() not in vars_copy['ansible_search_path']:
+ vars_copy['ansible_search_path'].append(self._loader.get_basedir())
+
templar = Templar(loader=self._loader, variables=vars_copy)
items = []
has_loop = True
if task.loop_with is not None:
if task.loop_with in lookup_loader:
+ fail = True
+ if task.loop_with == 'first_found':
+ # first_found loops are special. If the item is undefined then we want to fall through to the next
+ fail = False
try:
loop_terms = listify_lookup_plugin_terms(terms=task.loop, templar=templar,
- loader=self._loader, fail_on_undefined=True, convert_bare=False)
- items = wrap_var(lookup_loader.get(task.loop_with, loader=self._loader, templar=templar).run(terms=loop_terms, variables=vars_copy))
+ loader=self._loader, fail_on_undefined=fail, convert_bare=False)
+
+ if not fail:
+ loop_terms = [t for t in loop_terms if not templar.is_template(t)]
+
+ mylookup = lookup_loader.get(task.loop_with, loader=self._loader, templar=templar)
+
+ # give lookup task 'context' for subdir (mostly needed for first_found)
+ for subdir in ['template', 'var', 'file']: # TODO: move this to constants?
+ if subdir in task.action:
+ break
+ setattr(mylookup, '_subdir', subdir + 's')
+
+ items = wrap_var(mylookup.run(terms=loop_terms, variables=vars_copy))
+
except AnsibleTemplateError:
# This task will be skipped later due to this, so we just setup
# a dummy array for the later code so it doesn't fail
diff --git a/lib/ansible/vars/reserved.py b/lib/ansible/vars/reserved.py
index d7daea07..2a2ec8de 100644
--- a/lib/ansible/vars/reserved.py
+++ b/lib/ansible/vars/reserved.py
@@ -65,12 +65,17 @@ def get_reserved_names(include_private=True):
return result
-def warn_if_reserved(myvars):
+def warn_if_reserved(myvars, additional=None):
''' this function warns if any variable passed conflicts with internally reserved names '''
+ if additional is None:
+ reserved = _RESERVED_NAMES
+ else:
+ reserved = _RESERVED_NAMES.union(additional)
+
varnames = set(myvars)
varnames.discard('vars') # we add this one internally, so safe to ignore
- for varname in varnames.intersection(_RESERVED_NAMES):
+ for varname in varnames.intersection(reserved):
display.warning('Found variable using reserved name: %s' % varname)