summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLee Garrett <lgarrett@rocketjump.eu>2023-01-31 00:05:27 +0100
committerLee Garrett <lgarrett@rocketjump.eu>2023-01-31 00:05:27 +0100
commit520506f035967306d0548a8f69a0fea3181dca35 (patch)
tree6bc71745aeee6208f8382bd067b6c9e1c9a02e2a
parent46bbbf9f8e527b7ab4329a0aa16e3d38bfbb0c13 (diff)
downloaddebian-ansible-core-520506f035967306d0548a8f69a0fea3181dca35.zip
New upstream version 2.14.2
-rw-r--r--PKG-INFO3
-rwxr-xr-xbin/ansible-galaxy25
-rwxr-xr-xbin/ansible-inventory17
-rw-r--r--changelogs/CHANGELOG-v2.14.rst81
-rw-r--r--changelogs/changelog.yaml176
-rw-r--r--docs/docsite/Makefile2
-rw-r--r--docs/docsite/ansible_2_10.invbin239714 -> 0 bytes
-rw-r--r--docs/docsite/ansible_2_5.invbin123432 -> 0 bytes
-rw-r--r--docs/docsite/ansible_2_6.invbin134346 -> 0 bytes
-rw-r--r--docs/docsite/ansible_2_7.invbin149642 -> 0 bytes
-rw-r--r--docs/docsite/ansible_2_8.invbin198655 -> 0 bytes
-rw-r--r--docs/docsite/ansible_3.invbin273672 -> 0 bytes
-rw-r--r--docs/docsite/ansible_4.invbin2101614 -> 0 bytes
-rw-r--r--docs/docsite/ansible_5.invbin2277393 -> 0 bytes
-rw-r--r--docs/docsite/ansible_6.invbin2172658 -> 2525471 bytes
-rw-r--r--docs/docsite/ansible_7.invbin0 -> 2609198 bytes
-rw-r--r--docs/docsite/jinja2.invbin3584 -> 3594 bytes
-rw-r--r--docs/docsite/python3.invbin125392 -> 129639 bytes
-rw-r--r--docs/docsite/rst/community/collection_development_process.rst11
-rw-r--r--docs/docsite/rst/community/communication.rst9
-rw-r--r--docs/docsite/rst/community/contributor_path.rst4
-rw-r--r--docs/docsite/rst/community/create_pr_quick_start.rst10
-rw-r--r--docs/docsite/rst/dev_guide/ansible_index.rst1
-rw-r--r--docs/docsite/rst/dev_guide/core_index.rst1
-rw-r--r--docs/docsite/rst/dev_guide/developing_collections_testing.rst12
-rw-r--r--docs/docsite/rst/dev_guide/developing_locally.rst15
-rw-r--r--docs/docsite/rst/dev_guide/developing_modules_documenting.rst11
-rw-r--r--docs/docsite/rst/dev_guide/developing_plugins.rst12
-rw-r--r--docs/docsite/rst/dev_guide/sidecar.rst100
-rw-r--r--docs/docsite/rst/dev_guide/testing.rst3
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/compile.rst30
-rw-r--r--docs/docsite/rst/dev_guide/testing_compile.rst70
-rw-r--r--docs/docsite/rst/dev_guide/testing_documentation.rst14
-rw-r--r--docs/docsite/rst/dev_guide/testing_integration.rst7
-rw-r--r--docs/docsite/rst/dev_guide/testing_integration_legacy.rst114
-rw-r--r--docs/docsite/rst/dev_guide/testing_running_locally.rst357
-rw-r--r--docs/docsite/rst/getting_started/get_started_inventory.rst2
-rw-r--r--docs/docsite/rst/getting_started/index.rst2
-rw-r--r--docs/docsite/rst/installation_guide/installation_distros.rst2
-rw-r--r--docs/docsite/rst/inventory_guide/intro_dynamic_inventory.rst2
-rw-r--r--docs/docsite/rst/inventory_guide/intro_inventory.rst10
-rw-r--r--docs/docsite/rst/os_guide/windows_faq.rst2
-rw-r--r--docs/docsite/rst/playbook_guide/complex_data_manipulation.rst2
-rw-r--r--docs/docsite/rst/playbook_guide/playbooks_blocks.rst2
-rw-r--r--docs/docsite/rst/playbook_guide/playbooks_filters.rst2
-rw-r--r--docs/docsite/rst/playbook_guide/playbooks_variables.rst4
-rw-r--r--docs/docsite/rst/plugins/callback.rst21
-rw-r--r--docs/docsite/rst/porting_guides/porting_guide_6.rst58
-rw-r--r--docs/docsite/rst/porting_guides/porting_guide_7.rst51
-rw-r--r--docs/docsite/rst/reference_appendices/release_and_maintenance.rst215
-rw-r--r--docs/docsite/rst/scenario_guides/vmware_rest_scenarios/authentication.rst2
-rw-r--r--docs/docsite/sphinx_conf/all_conf.py11
-rw-r--r--docs/docsite/sphinx_conf/ansible_conf.py9
-rw-r--r--docs/docsite/sphinx_conf/core_conf.py9
-rw-r--r--docs/docsite/sphinx_conf/core_lang_conf.py9
-rw-r--r--docs/man/man1/ansible-config.12
-rw-r--r--docs/man/man1/ansible-console.14
-rw-r--r--docs/man/man1/ansible-doc.18
-rw-r--r--docs/man/man1/ansible-galaxy.19
-rw-r--r--docs/man/man1/ansible-inventory.12
-rw-r--r--docs/man/man1/ansible-playbook.14
-rw-r--r--docs/man/man1/ansible-pull.14
-rw-r--r--docs/man/man1/ansible-vault.12
-rw-r--r--docs/man/man1/ansible.16
-rwxr-xr-xlib/ansible/cli/galaxy.py25
-rwxr-xr-xlib/ansible/cli/inventory.py17
-rw-r--r--lib/ansible/executor/play_iterator.py2
-rw-r--r--lib/ansible/module_utils/ansible_release.py2
-rw-r--r--lib/ansible/module_utils/common/arg_spec.py47
-rw-r--r--lib/ansible/module_utils/common/parameters.py55
-rw-r--r--lib/ansible/module_utils/errors.py4
-rw-r--r--lib/ansible/modules/command.py2
-rw-r--r--lib/ansible/modules/file.py58
-rw-r--r--lib/ansible/modules/get_url.py2
-rw-r--r--lib/ansible/modules/setup.py2
-rw-r--r--lib/ansible/modules/systemd.py10
-rw-r--r--lib/ansible/modules/systemd_service.py10
-rw-r--r--lib/ansible/modules/uri.py6
-rw-r--r--lib/ansible/plugins/action/template.py6
-rw-r--r--lib/ansible/plugins/action/validate_argument_spec.py2
-rw-r--r--lib/ansible/plugins/connection/local.py10
-rw-r--r--lib/ansible/plugins/doc_fragments/validate.py4
-rw-r--r--lib/ansible/plugins/filter/product.yml2
-rw-r--r--lib/ansible/plugins/filter/regex_replace.yml2
-rw-r--r--lib/ansible/plugins/filter/regex_search.yml2
-rw-r--r--lib/ansible/plugins/list.py5
-rw-r--r--lib/ansible/plugins/loader.py3
-rw-r--r--lib/ansible/plugins/strategy/__init__.py4
-rw-r--r--lib/ansible/release.py2
-rw-r--r--lib/ansible_core.egg-info/PKG-INFO3
-rw-r--r--lib/ansible_core.egg-info/SOURCES.txt32
-rw-r--r--setup.cfg1
-rw-r--r--test/integration/targets/ansible-test-container/aliases5
-rwxr-xr-xtest/integration/targets/ansible-test-container/runme.py1090
-rwxr-xr-xtest/integration/targets/ansible-test-container/runme.sh5
-rw-r--r--test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/col/plugins/modules/invalid_yaml_syntax.py27
-rw-r--r--test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/README.rst3
-rw-r--r--test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/galaxy.yml6
-rw-r--r--test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/meta/main.yml1
-rw-r--r--test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/plugins/modules/failure_ps.ps116
-rw-r--r--test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/plugins/modules/failure_ps.yml31
-rw-r--r--test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/plugins/module_utils/share_module.psm119
-rw-r--r--test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/plugins/modules/in_function.ps17
-rw-r--r--test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/plugins/modules/in_function.yml25
-rw-r--r--test/integration/targets/ansible-test-sanity-validate-modules/expected.txt5
-rwxr-xr-xtest/integration/targets/ansible-test-sanity-validate-modules/runme.sh19
-rw-r--r--test/integration/targets/argspec/library/argspec.py104
-rw-r--r--test/integration/targets/argspec/tasks/main.yml211
-rw-r--r--test/integration/targets/blocks/79711.yml17
-rwxr-xr-xtest/integration/targets/blocks/runme.sh9
-rw-r--r--test/integration/targets/file/tasks/main.yml76
-rw-r--r--test/integration/targets/get_url/tasks/ciphers.yml2
-rw-r--r--test/integration/targets/inventory_script/inventory.json4
-rw-r--r--test/integration/targets/reboot/aliases7
-rw-r--r--test/integration/targets/reboot/tasks/main.yml68
-rw-r--r--test/integration/targets/roles_arg_spec/test_complex_role_fails.yml27
-rw-r--r--test/integration/targets/setup_epel/tasks/main.yml5
-rw-r--r--test/integration/targets/var_templating/ansible_debug_template.j21
-rwxr-xr-xtest/integration/targets/var_templating/runme.sh3
-rw-r--r--test/integration/targets/var_templating/test_vars_with_sources.yml9
-rw-r--r--test/integration/targets/yum/tasks/yuminstallroot.yml9
-rw-r--r--test/lib/ansible_test/_data/completion/docker.txt16
-rw-r--r--test/lib/ansible_test/_internal/__init__.py9
-rw-r--r--test/lib/ansible_test/_internal/ansible_util.py5
-rw-r--r--test/lib/ansible_test/_internal/cgroup.py110
-rw-r--r--test/lib/ansible_test/_internal/cli/argparsing/parsers.py23
-rw-r--r--test/lib/ansible_test/_internal/cli/environments.py20
-rw-r--r--test/lib/ansible_test/_internal/cli/parsers/key_value_parsers.py10
-rw-r--r--test/lib/ansible_test/_internal/commands/env/__init__.py23
-rw-r--r--test/lib/ansible_test/_internal/commands/integration/__init__.py22
-rw-r--r--test/lib/ansible_test/_internal/commands/shell/__init__.py19
-rw-r--r--test/lib/ansible_test/_internal/completion.py43
-rw-r--r--test/lib/ansible_test/_internal/config.py3
-rw-r--r--test/lib/ansible_test/_internal/connections.py8
-rw-r--r--test/lib/ansible_test/_internal/constants.py2
-rw-r--r--test/lib/ansible_test/_internal/containers.py146
-rw-r--r--test/lib/ansible_test/_internal/coverage_util.py5
-rw-r--r--test/lib/ansible_test/_internal/delegation.py12
-rw-r--r--test/lib/ansible_test/_internal/dev/__init__.py2
-rw-r--r--test/lib/ansible_test/_internal/dev/container_probe.py210
-rw-r--r--test/lib/ansible_test/_internal/docker_util.py785
-rw-r--r--test/lib/ansible_test/_internal/host_configs.py10
-rw-r--r--test/lib/ansible_test/_internal/host_profiles.py684
-rw-r--r--test/lib/ansible_test/_internal/inventory.py5
-rw-r--r--test/lib/ansible_test/_internal/provisioning.py23
-rw-r--r--test/lib/ansible_test/_internal/ssh.py51
-rw-r--r--test/lib/ansible_test/_internal/target.py2
-rw-r--r--test/lib/ansible_test/_internal/thread.py23
-rw-r--r--test/lib/ansible_test/_internal/util.py17
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/pylint/config/ansible-test.cfg1
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/ps_argspec.ps114
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/utils.py4
-rw-r--r--test/lib/ansible_test/_util/target/setup/bootstrap.sh3
-rw-r--r--test/lib/ansible_test/_util/target/setup/check_systemd_cgroup_v1.sh17
-rw-r--r--test/lib/ansible_test/_util/target/setup/probe_cgroups.py31
-rw-r--r--test/units/galaxy/test_collection.py98
-rw-r--r--test/units/module_utils/common/arg_spec/test_aliases.py7
-rw-r--r--test/units/module_utils/common/arg_spec/test_module_validate.py2
158 files changed, 5299 insertions, 838 deletions
diff --git a/PKG-INFO b/PKG-INFO
index 373b0ae4..b3beba17 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 2.1
Name: ansible-core
-Version: 2.14.1
+Version: 2.14.2
Summary: Radically simple IT automation
Home-page: https://ansible.com/
Author: Ansible, Inc.
@@ -29,6 +29,7 @@ Classifier: Topic :: System :: Installation/Setup
Classifier: Topic :: System :: Systems Administration
Classifier: Topic :: Utilities
Requires-Python: >=3.9
+Description-Content-Type: text/x-rst
License-File: COPYING
|PyPI version| |Docs badge| |Chat badge| |Build Status| |Code Of Conduct| |Mailing Lists| |License| |CII Best Practices|
diff --git a/bin/ansible-galaxy b/bin/ansible-galaxy
index f4148d92..3cb7fe2c 100755
--- a/bin/ansible-galaxy
+++ b/bin/ansible-galaxy
@@ -79,15 +79,11 @@ SERVER_DEF = [
# config definition fields
SERVER_ADDITIONAL = {
'v3': {'default': 'False'},
- 'validate_certs': {'default': True, 'cli': [{'name': 'validate_certs'}]},
+ 'validate_certs': {'cli': [{'name': 'validate_certs'}]},
'timeout': {'default': '60', 'cli': [{'name': 'timeout'}]},
'token': {'default': None},
}
-# override default if the generic is set
-if C.GALAXY_IGNORE_CERTS is not None:
- SERVER_ADDITIONAL['validate_certs'].update({'default': not C.GALAXY_IGNORE_CERTS})
-
def with_collection_artifacts_manager(wrapped_method):
"""Inject an artifacts manager if not passed explicitly.
@@ -100,7 +96,8 @@ def with_collection_artifacts_manager(wrapped_method):
if 'artifacts_manager' in kwargs:
return wrapped_method(*args, **kwargs)
- artifacts_manager_kwargs = {'validate_certs': context.CLIARGS['validate_certs']}
+ # FIXME: use validate_certs context from Galaxy servers when downloading collections
+ artifacts_manager_kwargs = {'validate_certs': context.CLIARGS['resolved_validate_certs']}
keyring = context.CLIARGS.get('keyring', None)
if keyring is not None:
@@ -197,7 +194,11 @@ class RoleDistributionServer:
class GalaxyCLI(CLI):
- '''command to manage Ansible roles in shared repositories, the default of which is Ansible Galaxy *https://galaxy.ansible.com*.'''
+ '''Command to manage Ansible roles and collections.
+
+ None of the CLI tools are designed to run concurrently with themselves.
+ Use an external scheduler and/or locking to ensure there are no clashing operations.
+ '''
name = 'ansible-galaxy'
@@ -584,6 +585,8 @@ class GalaxyCLI(CLI):
# ensure we have 'usable' cli option
setattr(options, 'validate_certs', (None if options.ignore_certs is None else not options.ignore_certs))
+ # the default if validate_certs is None
+ setattr(options, 'resolved_validate_certs', (options.validate_certs if options.validate_certs is not None else not C.GALAXY_IGNORE_CERTS))
display.verbosity = options.verbosity
return options
@@ -641,6 +644,8 @@ class GalaxyCLI(CLI):
token_val = server_options['token'] or NoTokenSentinel
username = server_options['username']
v3 = server_options.pop('v3')
+ if server_options['validate_certs'] is None:
+ server_options['validate_certs'] = context.CLIARGS['resolved_validate_certs']
validate_certs = server_options['validate_certs']
if v3:
@@ -676,9 +681,7 @@ class GalaxyCLI(CLI):
cmd_server = context.CLIARGS['api_server']
cmd_token = GalaxyToken(token=context.CLIARGS['api_key'])
- # resolve validate_certs
- v_config_default = True if C.GALAXY_IGNORE_CERTS is None else not C.GALAXY_IGNORE_CERTS
- validate_certs = v_config_default if context.CLIARGS['validate_certs'] is None else context.CLIARGS['validate_certs']
+ validate_certs = context.CLIARGS['resolved_validate_certs']
if cmd_server:
# Cmd args take precedence over the config entry but fist check if the arg was a name and use that config
# entry, otherwise create a new API entry for the server specified.
@@ -844,7 +847,7 @@ class GalaxyCLI(CLI):
name=coll_req['name'],
),
coll_req['source'],
- validate_certs=not context.CLIARGS['ignore_certs'],
+ validate_certs=context.CLIARGS['resolved_validate_certs'],
),
)
diff --git a/bin/ansible-inventory b/bin/ansible-inventory
index 0e860801..e8ed75e4 100755
--- a/bin/ansible-inventory
+++ b/bin/ansible-inventory
@@ -13,7 +13,6 @@ from ansible.cli import CLI
import sys
import argparse
-from operator import attrgetter
from ansible import constants as C
from ansible import context
@@ -273,11 +272,11 @@ class InventoryCLI(CLI):
result = [self._graph_name('@%s:' % group.name, depth)]
depth = depth + 1
- for kid in sorted(group.child_groups, key=attrgetter('name')):
+ for kid in group.child_groups:
result.extend(self._graph_group(kid, depth))
if group.name != 'all':
- for host in sorted(group.hosts, key=attrgetter('name')):
+ for host in group.hosts:
result.append(self._graph_name(host.name, depth))
if context.CLIARGS['show_vars']:
result.extend(self._show_vars(self._get_host_variables(host), depth + 1))
@@ -303,9 +302,9 @@ class InventoryCLI(CLI):
results = {}
results[group.name] = {}
if group.name != 'all':
- results[group.name]['hosts'] = [h.name for h in sorted(group.hosts, key=attrgetter('name'))]
+ results[group.name]['hosts'] = [h.name for h in group.hosts]
results[group.name]['children'] = []
- for subgroup in sorted(group.child_groups, key=attrgetter('name')):
+ for subgroup in group.child_groups:
results[group.name]['children'].append(subgroup.name)
if subgroup.name not in seen:
results.update(format_group(subgroup))
@@ -343,14 +342,14 @@ class InventoryCLI(CLI):
# subgroups
results[group.name]['children'] = {}
- for subgroup in sorted(group.child_groups, key=attrgetter('name')):
+ for subgroup in group.child_groups:
if subgroup.name != 'all':
results[group.name]['children'].update(format_group(subgroup))
# hosts for group
results[group.name]['hosts'] = {}
if group.name != 'all':
- for h in sorted(group.hosts, key=attrgetter('name')):
+ for h in group.hosts:
myvars = {}
if h.name not in seen: # avoid defining host vars more than once
seen.append(h.name)
@@ -377,7 +376,7 @@ class InventoryCLI(CLI):
results[group.name] = {}
results[group.name]['children'] = []
- for subgroup in sorted(group.child_groups, key=attrgetter('name')):
+ for subgroup in group.child_groups:
if subgroup.name == 'ungrouped' and not has_ungrouped:
continue
if group.name != 'all':
@@ -385,7 +384,7 @@ class InventoryCLI(CLI):
results.update(format_group(subgroup))
if group.name != 'all':
- for host in sorted(group.hosts, key=attrgetter('name')):
+ for host in group.hosts:
if host.name not in seen:
seen.add(host.name)
host_vars = self._get_host_variables(host=host)
diff --git a/changelogs/CHANGELOG-v2.14.rst b/changelogs/CHANGELOG-v2.14.rst
index 59056fc5..16bc0ac0 100644
--- a/changelogs/CHANGELOG-v2.14.rst
+++ b/changelogs/CHANGELOG-v2.14.rst
@@ -5,6 +5,87 @@ ansible-core 2.14 "C'mon Everybody" Release Notes
.. contents:: Topics
+v2.14.2
+=======
+
+Release Summary
+---------------
+
+| Release Date: 2023-01-30
+| `Porting Guide <https://docs.ansible.com/ansible/devel/porting_guides.html>`__
+
+
+Major Changes
+-------------
+
+- ansible-test - Docker Desktop on WSL2 is now supported (additional configuration required).
+- ansible-test - Docker and Podman are now supported on hosts with cgroup v2 unified. Previously only cgroup v1 and cgroup v2 hybrid were supported.
+- ansible-test - Podman now works on container hosts without systemd. Previously only some containers worked, while others required rootfull or rootless Podman, but would not work with both. Some containers did not work at all.
+- ansible-test - Podman on WSL2 is now supported.
+- ansible-test - When additional cgroup setup is required on the container host, this will be automatically detected. Instructions on how to configure the host will be provided in the error message shown.
+
+Minor Changes
+-------------
+
+- ansible-test - A new ``audit`` option is available when running custom containers. This option can be used to indicate whether a container requires the AUDIT_WRITE capability. The default is ``required``, which most containers will need when using Podman. If necessary, the ``none`` option can be used to opt-out of the capability. This has no effect on Docker, which always provides the capability.
+- ansible-test - A new ``cgroup`` option is available when running custom containers. This option can be used to indicate a container requires cgroup v1 or that it does not use cgroup. The default behavior assumes the container works with cgroup v2 (as well as v1).
+- ansible-test - Additional log details are shown when containers fail to start or SSH connections to containers fail.
+- ansible-test - Connection failures to remote provisioned hosts now show failure details as a warning.
+- ansible-test - Containers included with ansible-test no longer disable seccomp by default.
+- ansible-test - Failure to connect to a container over SSH now results in a clear error. Previously tests would be attempted even after initial connection attempts failed.
+- ansible-test - Integration tests can be excluded from retries triggered by the ``--retry-on-error`` option by adding the ``retry/never`` alias. This is useful for tests that cannot pass on a retry or are too slow to make retries useful.
+- ansible-test - More details are provided about an instance when provisioning fails.
+- ansible-test - Reduce the polling limit for SSHD startup in containers from 60 retries to 10. The one second delay between retries remains in place.
+- ansible-test - SSH connections from OpenSSH 8.8+ to CentOS 6 containers now work without additional configuration. However, clients older than OpenSSH 7.0 can no longer connect to CentOS 6 containers as a result. The container must have ``centos6`` in the image name for this work-around to be applied.
+- ansible-test - SSH shell connections from OpenSSH 8.8+ to ansible-test provisioned network instances now work without additional configuration. However, clients older than OpenSSH 7.0 can no longer open shell sessions for ansible-test provisioned network instances as a result.
+- ansible-test - The ``ansible-test env`` command now detects and reports the container ID if running in a container.
+- ansible-test - Unit tests now support network disconnect by default when running under Podman. Previously this feature only worked by default under Docker.
+- ansible-test - Use ``stop --time 0`` followed by ``rm`` to remove ephemeral containers instead of ``rm -f``. This speeds up teardown of ephemeral containers.
+- ansible-test - Warnings are now shown when using containers that were built with VOLUME instructions.
+- ansible-test - When setting the max open files for containers, the container host's limit will be checked. If the host limit is lower than the preferred value, it will be used and a warning will be shown.
+- ansible-test - When using Podman, ansible-test will detect if the loginuid used in containers is incorrect. When this occurs a warning is displayed and the container is run with the AUDIT_CONTROL capability. Previously containers would fail under this situation, with no useful warnings or errors given.
+
+Bugfixes
+--------
+
+- Correctly count rescued tasks in play recap (https://github.com/ansible/ansible/issues/79711)
+- Fix traceback when using the ``template`` module and running with ``ANSIBLE_DEBUG=1`` (https://github.com/ansible/ansible/issues/79763)
+- Fix using ``GALAXY_IGNORE_CERTS`` in conjunction with collections in requirements files which specify a specific ``source`` that isn't in the configured servers.
+- Fix using ``GALAXY_IGNORE_CERTS`` when downloading tarballs from Galaxy servers (https://github.com/ansible/ansible/issues/79557).
+- Module and role argument validation - include the valid suboption choices in the error when an invalid suboption is provided.
+- ansible-doc now will correctly display short descriptions on listing filters/tests no matter the directory sorting.
+- ansible-inventory will not explicitly sort groups/hosts anymore, giving a chance (depending on output format) to match the order in the input sources.
+- ansible-test - Added a work-around for a traceback under Python 3.11 when completing certain command line options.
+- ansible-test - Avoid using ``exec`` after container startup when possible. This improves container startup performance and avoids intermittent startup issues with some old containers.
+- ansible-test - Connection attempts to managed remote instances no longer abort on ``Permission denied`` errors.
+- ansible-test - Detection for running in a Podman or Docker container has been fixed to detect more scenarios. The new detection relies on ``/proc/self/mountinfo`` instead of ``/proc/self/cpuset``. Detection now works with custom cgroups and private cgroup namespaces.
+- ansible-test - Fix validate-modules error when retrieving PowerShell argspec when retrieved inside a Cmdlet
+- ansible-test - Handle server errors when executing the ``docker info`` command.
+- ansible-test - Multiple containers now work under Podman without specifying the ``--docker-network`` option.
+- ansible-test - Pass the ``XDG_RUNTIME_DIR`` environment variable through to container commands.
+- ansible-test - Perform PyPI proxy configuration after instances are ready and bootstrapping has been completed. Only target instances are affected, as controller instances were already handled this way. This avoids proxy configuration errors when target instances are not yet ready for use.
+- ansible-test - Prevent concurrent / repeat inspections of the same container image.
+- ansible-test - Prevent concurrent / repeat pulls of the same container image.
+- ansible-test - Prevent concurrent execution of cached methods.
+- ansible-test - Show the exception type when reporting errors during instance provisioning.
+- ansible-test sanity - correctly report invalid YAML in validate-modules (https://github.com/ansible/ansible/issues/75837).
+- argument spec validation - again report deprecated parameters for Python-based modules. This was accidentally removed in ansible-core 2.11 when argument spec validation was refactored (https://github.com/ansible/ansible/issues/79680, https://github.com/ansible/ansible/pull/79681).
+- argument spec validation - ensure that deprecated aliases in suboptions are also reported (https://github.com/ansible/ansible/pull/79740).
+- argument spec validation - fix warning message when two aliases of the same option are used for suboptions to also mention the option's name they are in (https://github.com/ansible/ansible/pull/79740).
+- connection local now avoids traceback on invalid user being used to execuet ansible (valid in host, but not in container).
+- file - touch action in check mode was always returning ok. Fix now evaluates the different conditions and returns the appropriate changed status. (https://github.com/ansible/ansible/issues/79360)
+- get_url - Ensure we are passing ciphers to all url_get calls (https://github.com/ansible/ansible/issues/79717)
+- plugin filter now works with rejectlist as documented (still falls back to blacklist if used).
+- uri - improve JSON content type detection
+
+Known Issues
+------------
+
+- ansible-test - Additional configuration may be required for certain container host and container combinations. Further details are available in the testing documentation.
+- ansible-test - Custom containers with ``VOLUME`` instructions may be unable to start, when previously the containers started correctly. Remove the ``VOLUME`` instructions to resolve the issue. Containers with this condition will cause ``ansible-test`` to emit a warning.
+- ansible-test - Systems with Podman networking issues may be unable to run containers, when previously the issue went unreported. Correct the networking issues to continue using ``ansible-test`` with Podman.
+- ansible-test - Using Docker on systems with SELinux may require setting SELinux to permissive mode. Podman should work with SELinux in enforcing mode.
+
v2.14.1
=======
diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml
index e2fd0ea0..e6c58239 100644
--- a/changelogs/changelog.yaml
+++ b/changelogs/changelog.yaml
@@ -885,3 +885,179 @@ releases:
- mention_acl.yml
- v2.14.1rc1_summary.yaml
release_date: '2022-11-28'
+ 2.14.2:
+ changes:
+ bugfixes:
+ - Fix traceback when using the ``template`` module and running with ``ANSIBLE_DEBUG=1``
+ (https://github.com/ansible/ansible/issues/79763)
+ release_summary: '| Release Date: 2023-01-30
+
+ | `Porting Guide <https://docs.ansible.com/ansible/devel/porting_guides.html>`__
+
+ '
+ codename: C'mon Everybody
+ fragments:
+ - 79763-ansible_debug_template_tb_fix.yml
+ - v2.14.2_summary.yaml
+ release_date: '2023-01-30'
+ 2.14.2rc1:
+ changes:
+ bugfixes:
+ - Correctly count rescued tasks in play recap (https://github.com/ansible/ansible/issues/79711)
+ - Fix using ``GALAXY_IGNORE_CERTS`` in conjunction with collections in requirements
+ files which specify a specific ``source`` that isn't in the configured servers.
+ - Fix using ``GALAXY_IGNORE_CERTS`` when downloading tarballs from Galaxy servers
+ (https://github.com/ansible/ansible/issues/79557).
+ - Module and role argument validation - include the valid suboption choices
+ in the error when an invalid suboption is provided.
+ - ansible-doc now will correctly display short descriptions on listing filters/tests
+ no matter the directory sorting.
+ - ansible-inventory will not explicitly sort groups/hosts anymore, giving a
+ chance (depending on output format) to match the order in the input sources.
+ - ansible-test - Added a work-around for a traceback under Python 3.11 when
+ completing certain command line options.
+ - ansible-test - Avoid using ``exec`` after container startup when possible.
+ This improves container startup performance and avoids intermittent startup
+ issues with some old containers.
+ - ansible-test - Connection attempts to managed remote instances no longer abort
+ on ``Permission denied`` errors.
+ - ansible-test - Detection for running in a Podman or Docker container has been
+ fixed to detect more scenarios. The new detection relies on ``/proc/self/mountinfo``
+ instead of ``/proc/self/cpuset``. Detection now works with custom cgroups
+ and private cgroup namespaces.
+ - ansible-test - Fix validate-modules error when retrieving PowerShell argspec
+ when retrieved inside a Cmdlet
+ - ansible-test - Handle server errors when executing the ``docker info`` command.
+ - ansible-test - Multiple containers now work under Podman without specifying
+ the ``--docker-network`` option.
+ - ansible-test - Pass the ``XDG_RUNTIME_DIR`` environment variable through to
+ container commands.
+ - ansible-test - Perform PyPI proxy configuration after instances are ready
+ and bootstrapping has been completed. Only target instances are affected,
+ as controller instances were already handled this way. This avoids proxy configuration
+ errors when target instances are not yet ready for use.
+ - ansible-test - Prevent concurrent / repeat inspections of the same container
+ image.
+ - ansible-test - Prevent concurrent / repeat pulls of the same container image.
+ - ansible-test - Prevent concurrent execution of cached methods.
+ - ansible-test - Show the exception type when reporting errors during instance
+ provisioning.
+ - ansible-test sanity - correctly report invalid YAML in validate-modules (https://github.com/ansible/ansible/issues/75837).
+ - argument spec validation - again report deprecated parameters for Python-based
+ modules. This was accidentally removed in ansible-core 2.11 when argument
+ spec validation was refactored (https://github.com/ansible/ansible/issues/79680,
+ https://github.com/ansible/ansible/pull/79681).
+ - argument spec validation - ensure that deprecated aliases in suboptions are
+ also reported (https://github.com/ansible/ansible/pull/79740).
+ - argument spec validation - fix warning message when two aliases of the same
+ option are used for suboptions to also mention the option's name they are
+ in (https://github.com/ansible/ansible/pull/79740).
+ - connection local now avoids traceback on invalid user being used to execuet
+ ansible (valid in host, but not in container).
+ - file - touch action in check mode was always returning ok. Fix now evaluates
+ the different conditions and returns the appropriate changed status. (https://github.com/ansible/ansible/issues/79360)
+ - get_url - Ensure we are passing ciphers to all url_get calls (https://github.com/ansible/ansible/issues/79717)
+ - plugin filter now works with rejectlist as documented (still falls back to
+ blacklist if used).
+ - uri - improve JSON content type detection
+ known_issues:
+ - ansible-test - Additional configuration may be required for certain container
+ host and container combinations. Further details are available in the testing
+ documentation.
+ - ansible-test - Custom containers with ``VOLUME`` instructions may be unable
+ to start, when previously the containers started correctly. Remove the ``VOLUME``
+ instructions to resolve the issue. Containers with this condition will cause
+ ``ansible-test`` to emit a warning.
+ - ansible-test - Systems with Podman networking issues may be unable to run
+ containers, when previously the issue went unreported. Correct the networking
+ issues to continue using ``ansible-test`` with Podman.
+ - ansible-test - Using Docker on systems with SELinux may require setting SELinux
+ to permissive mode. Podman should work with SELinux in enforcing mode.
+ major_changes:
+ - ansible-test - Docker Desktop on WSL2 is now supported (additional configuration
+ required).
+ - ansible-test - Docker and Podman are now supported on hosts with cgroup v2
+ unified. Previously only cgroup v1 and cgroup v2 hybrid were supported.
+ - ansible-test - Podman now works on container hosts without systemd. Previously
+ only some containers worked, while others required rootfull or rootless Podman,
+ but would not work with both. Some containers did not work at all.
+ - ansible-test - Podman on WSL2 is now supported.
+ - ansible-test - When additional cgroup setup is required on the container host,
+ this will be automatically detected. Instructions on how to configure the
+ host will be provided in the error message shown.
+ minor_changes:
+ - ansible-test - A new ``audit`` option is available when running custom containers.
+ This option can be used to indicate whether a container requires the AUDIT_WRITE
+ capability. The default is ``required``, which most containers will need when
+ using Podman. If necessary, the ``none`` option can be used to opt-out of
+ the capability. This has no effect on Docker, which always provides the capability.
+ - ansible-test - A new ``cgroup`` option is available when running custom containers.
+ This option can be used to indicate a container requires cgroup v1 or that
+ it does not use cgroup. The default behavior assumes the container works with
+ cgroup v2 (as well as v1).
+ - ansible-test - Additional log details are shown when containers fail to start
+ or SSH connections to containers fail.
+ - ansible-test - Connection failures to remote provisioned hosts now show failure
+ details as a warning.
+ - ansible-test - Containers included with ansible-test no longer disable seccomp
+ by default.
+ - ansible-test - Failure to connect to a container over SSH now results in a
+ clear error. Previously tests would be attempted even after initial connection
+ attempts failed.
+ - ansible-test - Integration tests can be excluded from retries triggered by
+ the ``--retry-on-error`` option by adding the ``retry/never`` alias. This
+ is useful for tests that cannot pass on a retry or are too slow to make retries
+ useful.
+ - ansible-test - More details are provided about an instance when provisioning
+ fails.
+ - ansible-test - Reduce the polling limit for SSHD startup in containers from
+ 60 retries to 10. The one second delay between retries remains in place.
+ - ansible-test - SSH connections from OpenSSH 8.8+ to CentOS 6 containers now
+ work without additional configuration. However, clients older than OpenSSH
+ 7.0 can no longer connect to CentOS 6 containers as a result. The container
+ must have ``centos6`` in the image name for this work-around to be applied.
+ - ansible-test - SSH shell connections from OpenSSH 8.8+ to ansible-test provisioned
+ network instances now work without additional configuration. However, clients
+ older than OpenSSH 7.0 can no longer open shell sessions for ansible-test
+ provisioned network instances as a result.
+ - ansible-test - The ``ansible-test env`` command now detects and reports the
+ container ID if running in a container.
+ - ansible-test - Unit tests now support network disconnect by default when running
+ under Podman. Previously this feature only worked by default under Docker.
+ - ansible-test - Use ``stop --time 0`` followed by ``rm`` to remove ephemeral
+ containers instead of ``rm -f``. This speeds up teardown of ephemeral containers.
+ - ansible-test - Warnings are now shown when using containers that were built
+ with VOLUME instructions.
+ - ansible-test - When setting the max open files for containers, the container
+ host's limit will be checked. If the host limit is lower than the preferred
+ value, it will be used and a warning will be shown.
+ - ansible-test - When using Podman, ansible-test will detect if the loginuid
+ used in containers is incorrect. When this occurs a warning is displayed and
+ the container is run with the AUDIT_CONTROL capability. Previously containers
+ would fail under this situation, with no useful warnings or errors given.
+ release_summary: '| Release Date: 2023-01-23
+
+ | `Porting Guide <https://docs.ansible.com/ansible/devel/porting_guides.html>`__
+
+ '
+ codename: C'mon Everybody
+ fragments:
+ - 75837-validate-modules-invalid-yaml.yml
+ - 76578-fix-role-argspec-suboptions-error.yml
+ - 79525-fix-file-touch-check-mode-status.yaml
+ - 79561-fix-a-g-global-ignore-certs-cfg.yml
+ - 79681-argspec-param-deprecation.yml
+ - 79711-fix-play-stats-rescued.yml
+ - 79717-get-url-ciphers.yml
+ - 79740-aliases-warnings-deprecations-in-suboptions.yml
+ - adoc_fix_list.yml
+ - ansible-test-container-management.yml
+ - ansible-test-fix-python-3.11-traceback.yml
+ - ansible-test-pypi-proxy-fix.yml
+ - better-maybe-json-uri.yml
+ - local_bad_user.yml
+ - rejectlist_fix.yml
+ - unsorted.yml
+ - v2.14.2rc1_summary.yaml
+ - validate-module-ps-cmdlet.yml
+ release_date: '2023-01-23'
diff --git a/docs/docsite/Makefile b/docs/docsite/Makefile
index 4e9cf272..f8e87590 100644
--- a/docs/docsite/Makefile
+++ b/docs/docsite/Makefile
@@ -82,7 +82,7 @@ gettext_generate_rst: collections_meta config cli keywords testing
ansible_structure:
# We must have python and python-packaging for the version_helper
# script so use it for version comparison
- if python -c "import sys, packaging.version as p; sys.exit(not p.Version('$(MAJOR_VERSION)') > p.Version('2.10'))" ; then \
+ if $(PYTHON) -c "import sys, packaging.version as p; sys.exit(not p.Version('$(MAJOR_VERSION)') > p.Version('2.10'))" ; then \
echo "Creating symlinks in ansible_structure"; \
ln -sf ../rst/ansible_index.rst rst/index.rst; \
ln -sf ../dev_guide/ansible_index.rst rst/dev_guide/index.rst; \
diff --git a/docs/docsite/ansible_2_10.inv b/docs/docsite/ansible_2_10.inv
deleted file mode 100644
index e646e94f..00000000
--- a/docs/docsite/ansible_2_10.inv
+++ /dev/null
Binary files differ
diff --git a/docs/docsite/ansible_2_5.inv b/docs/docsite/ansible_2_5.inv
deleted file mode 100644
index 05e5a2b0..00000000
--- a/docs/docsite/ansible_2_5.inv
+++ /dev/null
Binary files differ
diff --git a/docs/docsite/ansible_2_6.inv b/docs/docsite/ansible_2_6.inv
deleted file mode 100644
index b84a2661..00000000
--- a/docs/docsite/ansible_2_6.inv
+++ /dev/null
Binary files differ
diff --git a/docs/docsite/ansible_2_7.inv b/docs/docsite/ansible_2_7.inv
deleted file mode 100644
index 81cea2cb..00000000
--- a/docs/docsite/ansible_2_7.inv
+++ /dev/null
Binary files differ
diff --git a/docs/docsite/ansible_2_8.inv b/docs/docsite/ansible_2_8.inv
deleted file mode 100644
index 90486f6e..00000000
--- a/docs/docsite/ansible_2_8.inv
+++ /dev/null
Binary files differ
diff --git a/docs/docsite/ansible_3.inv b/docs/docsite/ansible_3.inv
deleted file mode 100644
index 99cd3592..00000000
--- a/docs/docsite/ansible_3.inv
+++ /dev/null
Binary files differ
diff --git a/docs/docsite/ansible_4.inv b/docs/docsite/ansible_4.inv
deleted file mode 100644
index d8e20b9e..00000000
--- a/docs/docsite/ansible_4.inv
+++ /dev/null
Binary files differ
diff --git a/docs/docsite/ansible_5.inv b/docs/docsite/ansible_5.inv
deleted file mode 100644
index 4213ef2c..00000000
--- a/docs/docsite/ansible_5.inv
+++ /dev/null
Binary files differ
diff --git a/docs/docsite/ansible_6.inv b/docs/docsite/ansible_6.inv
index ce737534..a03b2b28 100644
--- a/docs/docsite/ansible_6.inv
+++ b/docs/docsite/ansible_6.inv
Binary files differ
diff --git a/docs/docsite/ansible_7.inv b/docs/docsite/ansible_7.inv
new file mode 100644
index 00000000..4e9556f0
--- /dev/null
+++ b/docs/docsite/ansible_7.inv
Binary files differ
diff --git a/docs/docsite/jinja2.inv b/docs/docsite/jinja2.inv
index af1885e7..37ed7968 100644
--- a/docs/docsite/jinja2.inv
+++ b/docs/docsite/jinja2.inv
Binary files differ
diff --git a/docs/docsite/python3.inv b/docs/docsite/python3.inv
index ac234241..d7ab8c26 100644
--- a/docs/docsite/python3.inv
+++ b/docs/docsite/python3.inv
Binary files differ
diff --git a/docs/docsite/rst/community/collection_development_process.rst b/docs/docsite/rst/community/collection_development_process.rst
index 2da39649..3c2f609c 100644
--- a/docs/docsite/rst/community/collection_development_process.rst
+++ b/docs/docsite/rst/community/collection_development_process.rst
@@ -151,10 +151,19 @@ A single changelog fragment may contain multiple sections but most will only con
- idrac_user - module may error out with the message ``unable to perform the import or export operation`` because there are pending attribute changes or a configuration job is in progress. Wait for the job to complete and run the task again.(https://github.com/dell/dellemc-openmanage-ansible-modules/pull/303).
+**trivial**
+ Changes where a formal release changelog entry isn't required. ``trivial`` changelog fragments are excluded from the published changelog output and may be used for changes such as housekeeping, documentation and test only changes.
+ You can use ``trivial`` for collections that require a changelog fragment for each pull request.
+
+ .. code-block:: yaml
+
+ trivial:
+ - aws_ec2 - fix broken integration test (https://github.com/ansible-collections/amazon.aws/pull/1269).
+
Each changelog entry must contain a link to its issue between parentheses at the end. If there is no corresponding issue, the entry must contain a link to the PR itself.
-Most changelog entries are ``bugfixes`` or ``minor_changes``. You can also use ``trivial`` for any collection that requires a changelog fragment for each pull request. ``trivial`` changelog fragments are excluded from the changelog output.
+Most changelog entries are ``bugfixes`` or ``minor_changes``.
Changelog fragment entry format
diff --git a/docs/docsite/rst/community/communication.rst b/docs/docsite/rst/community/communication.rst
index 87a37e86..1e8bc645 100644
--- a/docs/docsite/rst/community/communication.rst
+++ b/docs/docsite/rst/community/communication.rst
@@ -22,7 +22,6 @@ Your first post to the mailing list will be moderated (to reduce spam), so pleas
* `Ansible Announce list <https://groups.google.com/forum/#!forum/ansible-announce>`_ is a read-only list that shares information about new releases of Ansible, and also rare infrequent event information, such as announcements about an upcoming AnsibleFest, which is our official conference series. Worth subscribing to!
* `Ansible AWX List <https://groups.google.com/forum/#!forum/awx-project>`_ is for `Ansible AWX <https://github.com/ansible/awx>`_
* `Ansible Development List <https://groups.google.com/forum/#!forum/ansible-devel>`_ is for questions about developing Ansible modules (mostly in Python), fixing bugs in the Ansible core code, asking about prospective feature design, or discussions about extending Ansible or features in progress.
-* `Ansible Lockdown List <https://groups.google.com/forum/#!forum/ansible-lockdown>`_ is for all things related to Ansible Lockdown projects, including DISA STIG automation and CIS Benchmarks.
* `Ansible Outreach List <https://groups.google.com/forum/#!forum/ansible-outreach>`_ help with promoting Ansible and `Ansible Meetups <https://www.meetup.com/topics/ansible/>`_
* `Ansible Project List <https://groups.google.com/forum/#!forum/ansible-project>`_ is for sharing Ansible tips, answering questions about playbooks and roles, and general user discussion.
* `Molecule Discussions <https://github.com/ansible-community/molecule/discussions>`_ is designed to aid with the development and testing of Ansible roles with Molecule.
@@ -56,7 +55,11 @@ Matrix chat supports:
* no line limits
* images
-The room links in the :ref:`general_channels` will take you directly to the relevant rooms. For more information, see the community-hosted `Matrix FAQ <https://hackmd.io/@ansible-community/community-matrix-faq>`_.
+The room links in the :ref:`general_channels` or in the :ref:`working_group_list` list will take you directly to the relevant rooms.
+
+If there is no appropriate room for your community, please create it.
+
+For more information, see the community-hosted `Matrix FAQ <https://hackmd.io/@ansible-community/community-matrix-faq>`_.
Ansible community on IRC
------------------------
@@ -104,7 +107,6 @@ Many of our community `Working Groups <https://github.com/ansible/community/wiki
- `AAP Configuration as Code <https://github.com/redhat-cop/controller_configuration/wiki/AAP-Configuration-as-Code>`_ - Matrix: `#aap_config_as_code:ansible.com <https://matrix.to/#/#aap_config_as_code:ansible.com>`_
- `Amazon (AWS) Working Group <https://github.com/ansible/community/wiki/AWS>`_ - Matrix: `#aws:ansible.com <https://matrix.to:/#/#aws:ansible.com>`_ | IRC: ``#ansible-aws``
-- `Ansible Lockdown Working Group <https://github.com/ansible/community/wiki/Lockdown>`_ (`Security playbooks/roles <https://github.com/ansible/ansible-lockdown>`_) - Matrix: `#lockdown:ansible.com <https://matrix.to:/#/#lockdown:ansible.com>`_ | IRC: ``#ansible-lockdown``
- `AWX Working Group <https://github.com/ansible/awx>`_ - Matrix: `#awx:ansible.com <https://matrix.to:/#/#awx:ansible.com>`_ | IRC: ``#ansible-awx``
- `Azure Working Group <https://github.com/ansible/community/wiki/Azure>`_ - Matrix: `#azure:ansible.com <https://matrix.to:/#/#azure:ansible.com>`_ | IRC: ``#ansible-azure``
- `Community Working Group <https://github.com/ansible/community/wiki/Community>`_ (including Meetups) - Matrix: `#community:ansible.com <https://matrix.to:/#/#community:ansible.com>`_ | IRC: ``#ansible-community``
@@ -123,6 +125,7 @@ Many of our community `Working Groups <https://github.com/ansible/community/wiki
- `Network Working Group <https://github.com/ansible/community/wiki/Network>`_ - Matrix: `#network:ansible.com <https://matrix.to:/#/#network:ansible.com>`_ | IRC: ``#ansible-network``
- `PostgreSQL Working Group <https://github.com/ansible-collections/community.postgresql/wiki/PostgreSQL-Working-Group>`_ - Matrix: `#postgresql:ansible.com <https://matrix.to:/#/#postgresql:ansible.com>`_
- `Remote Management Working Group <https://github.com/ansible/community/issues/409>`_ - Matrix: `#devel:ansible.com <https://matrix.to:/#/#devel:ansible.com>`_ | IRC: ``#ansible-devel``
+- `Security Automation Working Group <https://github.com/ansible/community/wiki/Security-Automation>`_ - Matrix: `#security-automation:ansible.com <https://matrix.to/#/#security-automation:ansible.com>`_ | IRC: ``#ansible-security``
- `Storage Working Group <https://github.com/ansible/community/wiki/Storage>`_ - Matrix: `#storage:ansible.com <https://matrix.to/#/#storage:ansible.com>`_ | IRC: ``#ansible-storage``
- `Testing Working Group <https://github.com/ansible/community/wiki/Testing>`_ - Matrix: `#devel:ansible.com <https://matrix.to:/#/#devel:ansible.com>`_ | IRC: ``#ansible-devel``
- `VMware Working Group <https://github.com/ansible/community/wiki/VMware>`_ - Matrix: `#vmware:ansible.com <https://matrix.to:/#/#vmware:ansible.com>`_ | IRC: ``#ansible-vmware``
diff --git a/docs/docsite/rst/community/contributor_path.rst b/docs/docsite/rst/community/contributor_path.rst
index 1708db37..de59be65 100644
--- a/docs/docsite/rst/community/contributor_path.rst
+++ b/docs/docsite/rst/community/contributor_path.rst
@@ -81,7 +81,7 @@ Become a collection maintainer
If you are a code contributor to a collection, you can get extended permissions in the repository and become a maintainer. A collection maintainer is a contributor trusted by the community who makes significant and regular contributions to the project and showed themselves as a specialist in the related area. See :ref:`maintainers` for details.
-For some collections that use the `collection bot <https://github.com/ansible-community/collection_bot>`_, such as `community.general <https://github.com/ansible-collections/community.general>`_ and `community.network <https://github.com/ansible-collections/community.network>`_, you can have different levels of access and permissions.
+For some collections that use the `collection bot <https://github.com/ansible-community/collection_bot>`_, such as `community.general <https://github.com/ansible-collections/community.general>`_ and `community.network <https://github.com/ansible-collections/community.network>`_, you can have different levels of access and permissions.
* :ref:`module_maintainers` - The stage prior to becoming a collection maintainer. The file is usually a module or plugin. File maintainers have indirect commit rights.
* supershipit permissions - Similar to being a file maintainer but the scope where a maintainer has the indirect commit is the whole repository.
@@ -103,4 +103,4 @@ To reach the status, as the current Committee members did before getting it, alo
* Subscribe to, comment on, and vote on the `Community Topics <https://github.com/ansible-community/community-topics/issues>`_.
* Propose your topics.
-* If time permits, join the `Community meetings <https://github.com/ansible/community/blob/main/meetings/README.md#schedule>`_ Note this is **NOT** a requirement.
+* If time permits, join the `Community meetings <https://github.com/ansible/community/blob/main/meetings/README.md#schedule>`_. Note this is **NOT** a requirement.
diff --git a/docs/docsite/rst/community/create_pr_quick_start.rst b/docs/docsite/rst/community/create_pr_quick_start.rst
index 083a1f2b..5e4ed023 100644
--- a/docs/docsite/rst/community/create_pr_quick_start.rst
+++ b/docs/docsite/rst/community/create_pr_quick_start.rst
@@ -16,7 +16,7 @@ Prepare your environment
These steps assume a Linux work environment with ``git`` installed.
-1. Install and start ``docker`` or ``podman`` with the ``docker`` executable shim. This insures tests run properly isolated and in the exact environments as in CI. The latest ``ansible-core`` development version also supports the ``podman`` CLI program.
+1. Install and start ``docker`` or ``podman``. This ensures tests run properly isolated and in the same environment as in CI.
2. :ref:`Install Ansible or ansible-core <installation_guide>`. You need the ``ansible-test`` utility which is provided by either of these packages.
@@ -155,11 +155,9 @@ See :ref:`module_contribution` for some general guidelines about Ansible module
Test your changes
=================
- If using the ``docker`` CLI program, the host must be configured to use cgroupsv1 (this is not required for ``podman``). This can be done by adding ``systemd.unified_cgroup_hierarchy=0`` to the kernel boot arguments (requires bootloader config update and reboot).
-
1. Install ``flake8`` (``pip install flake8``, or install the corresponding package on your operating system).
-1. Run ``flake8`` against a changed file:
+2. Run ``flake8`` against a changed file:
.. code-block:: bash
@@ -169,7 +167,7 @@ Test your changes
This shows unused imports, which are not shown by sanity tests, as well as other common issues.
Optionally, you can use the ``--max-line-length=160`` command-line argument.
-2. Run sanity tests:
+3. Run sanity tests:
.. code-block:: bash
@@ -178,7 +176,7 @@ Test your changes
If they failed, look at the output carefully - it is informative and helps to identify a problem line quickly.
Sanity failings usually relate to incorrect code and documentation formatting.
-3. Run integration tests:
+4. Run integration tests:
.. code-block:: bash
diff --git a/docs/docsite/rst/dev_guide/ansible_index.rst b/docs/docsite/rst/dev_guide/ansible_index.rst
index e79069f7..0736df15 100644
--- a/docs/docsite/rst/dev_guide/ansible_index.rst
+++ b/docs/docsite/rst/dev_guide/ansible_index.rst
@@ -75,6 +75,7 @@ If you prefer to read the entire guide, here's a list of the pages in order.
developing_python_3
debugging
developing_modules_documenting
+ adjacent_yaml_doc
developing_modules_general_windows
developing_modules_general_aci
platforms/aws_guidelines
diff --git a/docs/docsite/rst/dev_guide/core_index.rst b/docs/docsite/rst/dev_guide/core_index.rst
index 6c479e97..00a7db63 100644
--- a/docs/docsite/rst/dev_guide/core_index.rst
+++ b/docs/docsite/rst/dev_guide/core_index.rst
@@ -71,6 +71,7 @@ If you prefer to read the entire guide, here's a list of the pages in order.
developing_python_3
debugging
developing_modules_documenting
+ sidecar
developing_modules_general_windows
developing_modules_general_aci
developing_modules_in_groups
diff --git a/docs/docsite/rst/dev_guide/developing_collections_testing.rst b/docs/docsite/rst/dev_guide/developing_collections_testing.rst
index 7f36dcdf..50d55ca5 100644
--- a/docs/docsite/rst/dev_guide/developing_collections_testing.rst
+++ b/docs/docsite/rst/dev_guide/developing_collections_testing.rst
@@ -4,7 +4,7 @@
Testing collections
*******************
-Testing your collection ensures that your code works well and integrates well with the rest of the Ansible ecosystem. Your collection should pass the general compile and sanity tests for Ansible code. You should also add unit tests to cover the code in your collection and integration tests to cover the interactions between your collection and ansible-core.
+Testing your collection ensures that your code works well and integrates well with the rest of the Ansible ecosystem. Your collection should pass the sanity tests for Ansible code. You should also add unit tests to cover the code in your collection and integration tests to cover the interactions between your collection and ansible-core.
.. contents::
:local:
@@ -15,20 +15,20 @@ Testing tools
The main tool for testing collections is ``ansible-test``, Ansible's testing tool described in :ref:`developing_testing` and provided by both the ``ansible`` and ``ansible-core`` packages.
-You can run several compile and sanity checks, as well as run unit and integration tests for plugins using ``ansible-test``. When you test collections, test against the ansible-core version(s) you are targeting.
+You can run several sanity tests, as well as run unit and integration tests for plugins using ``ansible-test``. When you test collections, test against the ansible-core version(s) you are targeting.
You must always execute ``ansible-test`` from the root directory of a collection. You can run ``ansible-test`` in Docker containers without installing any special requirements. The Ansible team uses this approach in Azure Pipelines both in the ansible/ansible GitHub repository and in the large community collections such as `community.general <https://github.com/ansible-collections/community.general/>`_ and `community.network <https://github.com/ansible-collections/community.network/>`_. The examples below demonstrate running tests in Docker containers.
-Compile and sanity tests
-------------------------
+Sanity tests
+------------
-To run all compile and sanity tests:
+To run all sanity tests:
.. code-block:: shell-session
ansible-test sanity --docker default -v
-See :ref:`testing_compile` and :ref:`testing_sanity` for more information. See the :ref:`full list of sanity tests <all_sanity_tests>` for details on the sanity tests and how to fix identified issues.
+See :ref:`testing_sanity` for more information. See the :ref:`full list of sanity tests <all_sanity_tests>` for details on the sanity tests and how to fix identified issues.
Adding unit tests
-----------------
diff --git a/docs/docsite/rst/dev_guide/developing_locally.rst b/docs/docsite/rst/dev_guide/developing_locally.rst
index 338c73b7..2eefbc95 100644
--- a/docs/docsite/rst/dev_guide/developing_locally.rst
+++ b/docs/docsite/rst/dev_guide/developing_locally.rst
@@ -28,8 +28,8 @@ Modules and plugins: what is the difference?
============================================
If you are looking to add functionality to Ansible, you might wonder whether you need a module or a plugin. Here is a quick overview to help you understand what you need:
-* Modules are reusable, standalone scripts that can be used by the Ansible API, the :command:`ansible` command, or the :command:`ansible-playbook` command. Modules provide a defined interface. Each module accepts arguments and returns information to Ansible by printing a JSON string to stdout before exiting. Modules execute on the target system (usually that means on a remote system) in separate processes. Modules are technically plugins, but for historical reasons we do not usually talk about "module plugins".
-* :ref:`Plugins <working_with_plugins>` extend Ansible's core functionality and execute on the control node within the ``/usr/bin/ansible`` process. Plugins offer options and extensions for the core features of Ansible - transforming data, logging output, connecting to inventory, and more.
+* :ref:`Plugins <working_with_plugins>` extend Ansible's core functionality. Most plugin types execute on the control node within the ``/usr/bin/ansible`` process. Plugins offer options and extensions for the core features of Ansible: transforming data, logging output, connecting to inventory, and more.
+* Modules are a type of plugin that execute automation tasks on a 'target' (usually a remote system). Modules work as standalone scripts that Ansible executes in their own process outside of the controller. Modules interface with Ansible mostly via JSON, accepting arguments and returning information by printing a JSON string to stdout before exiting. Unlike the other plugins (which must be written in Python), modules can be written in any language; although Ansible provides modules in Python and Powershell only.
.. _use_collections:
@@ -42,10 +42,10 @@ The rest of this page describes other methods of using local, standalone modules
.. _local_modules:
-Adding a module outside of a collection
-=======================================
+Adding a module or plugin outside of a collection
+==================================================
-You can configure Ansible to load standalone local modules in a specified location or locations and make them available to all playbooks and roles. Alternatively, you can make a non-collection local module available only to specific playbooks or roles.
+You can configure Ansible to load standalone local modules or plugins in specific locations and make them available to all playbooks and roles (using configured paths). Alternatively, you can make a non-collection local module or plugin available only to certain playbooks or roles (via adjacent paths).
Adding standalone local modules for all playbooks and roles
-----------------------------------------------------------
@@ -70,9 +70,10 @@ To confirm that ``my_local_module`` is available:
* type ``ansible localhost -m my_local_module`` to see the output for that module, or
* type ``ansible-doc -t module my_local_module`` to see the documentation for that module
+.. note:: This applies to all plugin types but requires specific configuration and/or adjacent directories for each plugin type, see below.
.. note::
- Currently, the ``ansible-doc`` command can parse module documentation only from modules written in Python. If you have a module written in a programming language other than Python, please write the documentation in a Python file adjacent to the module file.
+ The ``ansible-doc`` command can parse module documentation from modules written in Python or an adjacent YAML file. If you have a module written in a programming language other than Python, you should write the documentation in a Python or YAML file adjacent to the module file. :ref:`adjacent_yaml_doc`
Adding standalone local modules for selected playbooks or a single role
-----------------------------------------------------------------------
@@ -82,6 +83,8 @@ Ansible automatically loads all executable files from certain directories adjace
* To use a standalone module only in a selected playbook or playbooks, store the module in a subdirectory called ``library`` in the directory that contains the playbook or playbooks.
* To use a standalone module only in a single role, store the module in a subdirectory called ``library`` within that role.
+.. note:: This applies to all plugin types but requires specific configuration and/or adjacent directories for each plugin type, see below.
+
.. warning::
Roles contained in collections cannot contain any modules or other plugins. All plugins in a collection must live in the collection ``plugins`` directory tree. All plugins in that tree are accessible to all roles in the collection. If you are developing new modules, we recommend distributing them in :ref:`collections <developing_collections>`, not in roles.
diff --git a/docs/docsite/rst/dev_guide/developing_modules_documenting.rst b/docs/docsite/rst/dev_guide/developing_modules_documenting.rst
index 081f7493..f459e433 100644
--- a/docs/docsite/rst/dev_guide/developing_modules_documenting.rst
+++ b/docs/docsite/rst/dev_guide/developing_modules_documenting.rst
@@ -21,6 +21,10 @@ Every Ansible module written in Python must begin with seven standard sections i
Some older Ansible modules have ``imports`` at the bottom of the file, ``Copyright`` notices with the full GPL prefix, and/or ``DOCUMENTATION`` fields in the wrong order. These are legacy files that need updating - do not copy them into new modules. Over time we are updating and correcting older modules. Please follow the guidelines on this page!
+.. note:: For non-Python modules you still create a ``.py`` file for documentation purposes. Starting at ansible-core 2.14 you can instead choose to create a ``.yml`` file that has the same data structure, but in pure YAML.
+ With YAML files, the examples below are easy to use by removing Python quoting and substituting ``=`` for ``:``, for example ``DOCUMENTATION = r''' ... '''` ` to ``DOCUMENTATION: ...`` and removing closing quotes. :ref:`adjacent_yaml_doc`
+
+
.. _shebang:
Python shebang & UTF-8 coding
@@ -28,6 +32,10 @@ Python shebang & UTF-8 coding
Begin your Ansible module with ``#!/usr/bin/python`` - this "shebang" allows ``ansible_python_interpreter`` to work. Follow the shebang immediately with ``# -*- coding: utf-8 -*-`` to clarify that the file is UTF-8 encoded.
+.. note:: Using ``#!/usr/bin/env``, makes ``env`` the interpreter and bypasses ``ansible_<interpreter>_interpreter`` logic.
+.. note:: If you develop the module using a different scripting language, adjust the interpreter accordingly (``#!/usr/bin/<interpreter>``) so ``ansible_<interpreter>_interpreter`` can work for that specific language.
+.. note:: Binary modules do not require a shebang or an interpreter.
+
.. _copyright:
Copyright and license
@@ -48,7 +56,7 @@ Additions to the module (for instance, rewrites) are not permitted to add additi
.. code-block:: python
# Copyright: Contributors to the Ansible project
-
+
Any legal review will include the source control history, so an exhaustive copyright header is not necessary.
Please do not include a copyright year. If the existing copyright statement includes a year, do not edit the existing copyright year. Any existing copyright header should not be modified without permission from the copyright author.
@@ -82,6 +90,7 @@ Each documentation field is described below. Before committing your module docum
* As long as your module file is :ref:`available locally <local_modules>`, you can use ``ansible-doc -t module my_module_name`` to view your module documentation at the command line. Any parsing errors will be obvious - you can view details by adding ``-vvv`` to the command.
* You should also :ref:`test the HTML output <testing_module_documentation>` of your module documentation.
+
Documentation fields
--------------------
diff --git a/docs/docsite/rst/dev_guide/developing_plugins.rst b/docs/docsite/rst/dev_guide/developing_plugins.rst
index d1f8e82f..341e4fc8 100644
--- a/docs/docsite/rst/dev_guide/developing_plugins.rst
+++ b/docs/docsite/rst/dev_guide/developing_plugins.rst
@@ -81,6 +81,10 @@ Configuration sources follow the precedence rules for values in Ansible. When th
Plugins that support embedded documentation (see :ref:`ansible-doc` for the list) should include well-formed doc strings. If you inherit from a plugin, you must document the options it takes, either through a documentation fragment or as a copy. See :ref:`module_documenting` for more information on correct documentation. Thorough documentation is a good idea even if you're developing a plugin for local use.
+In ansible-core 2.14 we added support for documenting filter and test plugins. You have two options for providing documentation:
+ - Define a Python file that includes inline documentation for each plugin.
+ - Define a Python file for multiple plugins and create adjacent documentation files in YAML format.
+
Developing particular plugin types
==================================
@@ -313,7 +317,7 @@ Filter plugins
Filter plugins manipulate data. They are a feature of Jinja2 and are also available in Jinja2 templates used by the ``template`` module. As with all plugins, they can be easily extended, but instead of having a file for each one you can have several per file. Most of the filter plugins shipped with Ansible reside in a ``core.py``.
-Filter plugins do not use the standard configuration and documentation system described above.
+Filter plugins do not use the standard configuration system described above, but since ansible-core 2.14 can use it as plain documentation.
Since Ansible evaluates variables only when they are needed, filter plugins should propagate the exceptions ``jinja2.exceptions.UndefinedError`` and ``AnsibleUndefinedVariable`` to ensure undefined variables are only fatal when necessary.
@@ -356,7 +360,7 @@ Here's a simple lookup plugin implementation --- this lookup returns the content
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
- DOCUMENTATION = """
+ DOCUMENTATION = r"""
name: file
author: Daniel Hokka Zakrisson (@dhozac) <daniel@hozac.com>
version_added: "0.9" # for collections, use the collection version, not the Ansible version
@@ -451,7 +455,7 @@ Test plugins
Test plugins verify data. They are a feature of Jinja2 and are also available in Jinja2 templates used by the ``template`` module. As with all plugins, they can be easily extended, but instead of having a file for each one you can have several per file. Most of the test plugins shipped with Ansible reside in a ``core.py``. These are specially useful in conjunction with some filter plugins like ``map`` and ``select``; they are also available for conditional directives like ``when:``.
-Test plugins do not use the standard configuration and documentation system described above.
+Test plugins do not use the standard configuration system described above. Since ansible-core 2.14 test plugins can use plain documentation.
Since Ansible evaluates variables only when they are needed, test plugins should propagate the exceptions ``jinja2.exceptions.UndefinedError`` and ``AnsibleUndefinedVariable`` to ensure undefined variables are only fatal when necessary.
@@ -547,3 +551,5 @@ For example vars plugins, see the source code for the `vars plugins included wit
The development mailing list
:ref:`communication_irc`
How to join Ansible chat channels
+ :ref:`adjacent_yaml_doc`
+ Alternate YAML files as documentation
diff --git a/docs/docsite/rst/dev_guide/sidecar.rst b/docs/docsite/rst/dev_guide/sidecar.rst
new file mode 100644
index 00000000..ccf3aa75
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/sidecar.rst
@@ -0,0 +1,100 @@
+.. _adjacent_yaml_doc:
+
+*********************************
+Adjacent YAML documentation files
+*********************************
+
+.. contents::
+ :local:
+
+YAML documentation for plugins
+------------------------------
+For most Ansible plugins, the documentation is in the same file as the code. This approach does not work for cases when:
+
+ * Multiple plugins are defined in the same file, such as tests and filters.
+ * Plugins are written in a language other than Python (modules).
+
+These cases require plugins to provide documentation in an adjacent ``.py`` file. As of ansible-core 2.14, you can provide documentation as adjacent YAML files instead.
+The format of a YAML documentation file is nearly identical to its Python equivalent, except it is pure YAML.
+
+
+YAML format
+-----------
+In Python each section is a variable ``DOCUMENTATION = r""" ... """`` while in YAML it is a mapping key ``DOCUMENTATION: ...``.
+
+Here is a longer example that shows documentation as embedded in a Python file:
+
+.. code-block:: python
+
+ DOCUMENTATION = r'''
+ description: something
+ options:
+ option_name:
+ description: describe this config option
+ default: default value for this config option
+ env:
+ - name: NAME_OF_ENV_VAR
+ ini:
+ - section: section_of_ansible.cfg_where_this_config_option_is_defined
+ key: key_used_in_ansible.cfg
+ vars:
+ - name: name_of_ansible_var
+ - name: name_of_second_var
+ version_added: X.x
+ required: True/False
+ type: boolean/float/integer/list/none/path/pathlist/pathspec/string/tmppath
+ version_added: X.x
+ '''
+
+ EXAMPLES = r'''
+ # TODO: write examples
+ '''
+
+This example shows the same documentation in YAML format:
+
+.. code-block:: YAML
+
+ DOCUMENTATION:
+ description: something
+ options:
+ option_name:
+ description: describe this config option
+ default: default value for this config option
+ env:
+ - name: NAME_OF_ENV_VAR
+ ini:
+ - section: section_of_ansible.cfg_where_this_config_option_is_defined
+ key: key_used_in_ansible.cfg
+ vars:
+ - name: name_of_ansible_var
+ - name: name_of_second_var
+ version_added: X.x
+ required: True/False
+ type: boolean/float/integer/list/none/path/pathlist/pathspec/string/tmppath
+ version_added: X.x
+
+ EXAMPLES: # TODO: write examples
+
+As the examples above show, Python variables already contain YAML. The main change to use YAML documentation is to simply move the YAML out of such variables.
+
+ Any adjacent YAML documentation files must be in the same directory as the plugin or module that they document. This means the documentation is available in any directory that contains the plugins or modules.
+
+
+Supported plugin types
+----------------------
+YAML documentation is mainly intended for filters, tests and modules. While it is possible to use with other plugin types, Ansible always recommends having documentation in the same file as the code for most cases.
+
+.. seealso::
+
+ :ref:`list_of_collections`
+ Browse existing collections, modules, and plugins
+ :ref:`developing_api`
+ Learn about the Python API for task execution
+ :ref:`developing_inventory`
+ Learn about how to develop dynamic inventory sources
+ :ref:`developing_modules_general`
+ Learn about how to write Ansible modules
+ `Mailing List <https://groups.google.com/group/ansible-devel>`_
+ The development mailing list
+ :ref:`communication_irc`
+ How to join Ansible chat channels
diff --git a/docs/docsite/rst/dev_guide/testing.rst b/docs/docsite/rst/dev_guide/testing.rst
index a587a746..6b4716dd 100644
--- a/docs/docsite/rst/dev_guide/testing.rst
+++ b/docs/docsite/rst/dev_guide/testing.rst
@@ -26,9 +26,6 @@ Types of tests
At a high level we have the following classifications of tests:
-:compile:
- * :ref:`testing_compile`
- * Test python code against a variety of Python versions.
:sanity:
* :ref:`testing_sanity`
* Sanity tests are made up of scripts and tools used to perform static code analysis.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/compile.rst b/docs/docsite/rst/dev_guide/testing/sanity/compile.rst
index 222f94e4..40367218 100644
--- a/docs/docsite/rst/dev_guide/testing/sanity/compile.rst
+++ b/docs/docsite/rst/dev_guide/testing/sanity/compile.rst
@@ -1,4 +1,32 @@
+.. _testing_compile:
+
compile
=======
-See :ref:`testing_compile` for more information.
+All Python source files must successfully compile using all supported Python versions.
+
+.. note::
+
+ The list of supported Python versions is dependent on the version of ``ansible-core`` that you are using.
+ Make sure you consult the version of the documentation which matches your ``ansible-core`` version.
+
+Controller code, including plugins in Ansible Collections, must support the following Python versions:
+
+- 3.11
+- 3.10
+- 3.9
+
+Code which runs on targets (``modules`` and ``module_utils``) must support all controller supported Python versions,
+as well as the additional Python versions supported only on targets:
+
+- 3.8
+- 3.7
+- 3.6
+- 3.5
+- 2.7
+
+.. note::
+
+ Ansible Collections can be
+ `configured <https://github.com/ansible/ansible/blob/devel/test/lib/ansible_test/config/config.yml>`_
+ to support a subset of the target-only Python versions.
diff --git a/docs/docsite/rst/dev_guide/testing_compile.rst b/docs/docsite/rst/dev_guide/testing_compile.rst
index a4119f55..2f258c8b 100644
--- a/docs/docsite/rst/dev_guide/testing_compile.rst
+++ b/docs/docsite/rst/dev_guide/testing_compile.rst
@@ -1,76 +1,8 @@
:orphan:
-.. _testing_compile:
*************
Compile Tests
*************
-.. contents:: Topics
-
-Overview
-========
-
-Compile tests check source files for valid syntax on all supported python versions:
-
-- 2.4 (Ansible 2.3 only)
-- 2.6
-- 2.7
-- 3.5
-- 3.6
-- 3.7
-- 3.8
-- 3.9
-
-NOTE: In Ansible 2.4 and earlier the compile test was provided by a dedicated sub-command ``ansible-test compile`` instead of a sanity test using ``ansible-test sanity --test compile``.
-
-Running compile tests locally
-=============================
-
-Compile tests can be run across the whole code base by doing:
-
-.. code:: shell
-
- cd /path/to/ansible/source
- source hacking/env-setup
- ansible-test sanity --test compile
-
-Against a single file by doing:
-
-.. code:: shell
-
- ansible-test sanity --test compile lineinfile
-
-Or against a specific Python version by doing:
-
-.. code:: shell
-
- ansible-test sanity --test compile --python 2.7 lineinfile
-
-For advanced usage see the help:
-
-.. code:: shell
-
- ansible-test sanity --help
-
-
-Installing dependencies
-=======================
-
-``ansible-test`` has a number of dependencies , for ``compile`` tests we suggest running the tests with ``--local``, which is the default
-
-The dependencies can be installed using the ``--requirements`` argument. For example:
-
-.. code:: shell
-
- ansible-test sanity --test compile --requirements lineinfile
-
-
-
-The full list of requirements can be found at `test/lib/ansible_test/_data/requirements <https://github.com/ansible/ansible/tree/devel/test/lib/ansible_test/_data/requirements>`_. Requirements files are named after their respective commands. See also the `constraints <https://github.com/ansible/ansible/blob/devel/test/lib/ansible_test/_data/requirements/constraints.txt>`_ applicable to all commands.
-
-
-Extending compile tests
-=======================
-
-If you believe changes are needed to the compile tests please add a comment on the `Testing Working Group Agenda <https://github.com/ansible/community/blob/main/meetings/README.md>`_ so it can be discussed.
+This page has moved to :ref:`testing_compile`.
diff --git a/docs/docsite/rst/dev_guide/testing_documentation.rst b/docs/docsite/rst/dev_guide/testing_documentation.rst
index cfd309d6..280e2c03 100644
--- a/docs/docsite/rst/dev_guide/testing_documentation.rst
+++ b/docs/docsite/rst/dev_guide/testing_documentation.rst
@@ -1,14 +1,18 @@
:orphan:
.. _testing_module_documentation:
+.. _testing_plugin_documentation:
****************************
-Testing module documentation
+Testing plugin documentation
****************************
-Before you submit a module for inclusion in the main Ansible repo, you must test your module documentation for correct HTML rendering and to ensure that the argspec matches the documentation in your Python file. The community pages offer more information on :ref:`testing reStructuredText documentation <testing_documentation_locally>`.
+A quick test while developing is to use ``ansible-doc -t <plugin_type> <name>`` to see if it renders, you might need to add ``-M /path/to/module`` if the module is not somewhere Ansible expects to find it.
-To check the HTML output of your module documentation:
+Before you submit a plugin for inclusion in Ansible, you must test your documentation for correct HTML rendering and for modules to ensure that the argspec matches the documentation in your Python file.
+The community pages offer more information on :ref:`testing reStructuredText documentation <testing_documentation_locally>`.
+
+For example, to check the HTML output of your module documentation:
#. Ensure working :ref:`development environment <environment_setup>`.
#. Install required Python packages (drop '--user' in venv/virtualenv):
@@ -18,7 +22,7 @@ To check the HTML output of your module documentation:
pip install --user -r requirements.txt
pip install --user -r docs/docsite/requirements.txt
-#. Ensure your module is in the correct directory: ``lib/ansible/modules/$CATEGORY/mymodule.py``.
+#. Ensure your module is in the correct directory: ``lib/ansible/modules/mymodule.py`` or in a configured path.
#. Build HTML from your module documentation: ``MODULES=mymodule make webdocs``.
#. To build the HTML documentation for multiple modules, use a comma-separated list of module names: ``MODULES=mymodule,mymodule2 make webdocs``.
#. View the HTML page at ``file:///path/to/docs/docsite/_build/html/modules/mymodule_module.html``.
@@ -36,3 +40,5 @@ To ensure that your module documentation matches your ``argument_spec``:
.. code-block:: bash
ansible-test sanity --test validate-modules mymodule
+
+For other plugin types the steps are similar, just adjusting names and paths to the specific type.
diff --git a/docs/docsite/rst/dev_guide/testing_integration.rst b/docs/docsite/rst/dev_guide/testing_integration.rst
index b3358261..915281d0 100644
--- a/docs/docsite/rst/dev_guide/testing_integration.rst
+++ b/docs/docsite/rst/dev_guide/testing_integration.rst
@@ -187,13 +187,6 @@ Container images are updated regularly. To see the current list of container ima
The list is under the **target docker images and supported python version** heading.
-Legacy Cloud Tests
-==================
-
-Some of the cloud tests run as normal integration tests, and others run as legacy tests; see the
-:ref:`testing_integration_legacy` page for more information.
-
-
Other configuration for Cloud Tests
===================================
diff --git a/docs/docsite/rst/dev_guide/testing_integration_legacy.rst b/docs/docsite/rst/dev_guide/testing_integration_legacy.rst
deleted file mode 100644
index 115fde6c..00000000
--- a/docs/docsite/rst/dev_guide/testing_integration_legacy.rst
+++ /dev/null
@@ -1,114 +0,0 @@
-:orphan:
-
-.. _testing_integration_legacy:
-
-*******************************************
-Testing using the Legacy Integration system
-*******************************************
-
-.. contents:: Topics
-
-This page details how to run the integration tests that haven't been ported to the new ``ansible-test`` framework.
-
-The following areas are still tested using the legacy ``make tests`` command:
-
-* amazon (some)
-* azure
-* cloudflare
-* cloudscale
-* cloudstack
-* consul
-* exoscale
-* gce
-* jenkins
-* rackspace
-
-Over time the above list will be reduced as tests are ported to the ``ansible-test`` framework.
-
-
-Running Cloud Tests
-====================
-
-Cloud tests exercise capabilities of cloud modules (for example, ec2_key). These are
-not 'tests run in the cloud' so much as tests that use the cloud modules
-and are organized by cloud provider.
-
-Some AWS tests may use environment variables. It is recommended to either unset any AWS environment variables( such as ``AWS_DEFAULT_PROFILE``, ``AWS_SECRET_ACCESS_KEY``, and so on) or be sure that the environment variables match the credentials provided in ``credentials.yml`` to ensure the tests run with consistency to their full capability on the expected account. See `AWS CLI docs <https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html>`_ for information on creating a profile.
-
-Subsets of tests may be run by ``#commenting`` out unnecessary roles in the appropriate playbook, such as ``test/integration/amazon.yml``.
-
-In order to run cloud tests, you must provide access credentials in a file
-named ``credentials.yml``. A sample credentials file named
-``credentials.template`` is available for syntax help.
-
-Provide cloud credentials:
-
-.. code-block:: shell-session
-
- cp credentials.template credentials.yml
- ${EDITOR:-vi} credentials.yml
-
-
-Other configuration
-===================
-
-In order to run some tests, you must provide access credentials in a file named
-``credentials.yml``. A sample credentials file named ``credentials.template`` is available
-for syntax help.
-
-IAM policies for AWS
-====================
-
-In order to run the tests in an AWS account ansible needs fairly wide ranging powers which
-can be provided to a dedicated user or temporary credentials using a specific policy
-configured in the AWS account.
-
-testing-iam-policy.json.j2
---------------------------
-
-The testing-iam-policy.json.j2 file contains a policy which can be given to the user
-running the tests to give close to minimum rights required to run the tests. Please note
-that this does not fully restrict the user; The user has wide privileges for viewing
-account definitions and is also able to manage some resources that are not related to
-testing (for example, AWS lambdas with different names) primarily due to the limitations of the
-Amazon ARN notation. At the very least the policy limits the user to one region, however
-tests should not be run in a primary production account in any case.
-
-Other Definitions required
---------------------------
-
-Apart from installing the policy and giving it to the user identity running
-the tests, a lambda role `ansible_integration_tests` has to be created which
-has lambda basic execution privileges.
-
-
-Running Tests
-=============
-
-The tests are invoked through the ``Makefile``.
-
-If you haven't already got Ansible available use the local checkout by doing:
-
-.. code-block:: shell-session
-
- source hacking/env-setup
-
-Run the tests by doing:
-
-.. code-block:: shell-session
-
- cd test/integration/
- # TARGET is the name of the test from the list at the top of this page
- #make TARGET
- # for example
- make amazon
- # To run all cloud tests you can do:
- make cloud
-
-.. warning:: Possible cost of running cloud tests
-
- Running cloud integration tests will create and destroy cloud
- resources. Running these tests may result in additional fees associated with
- your cloud account. Care is taken to ensure that created resources are
- removed. However, it is advisable to inspect your AWS console to ensure no
- unexpected resources are running.
diff --git a/docs/docsite/rst/dev_guide/testing_running_locally.rst b/docs/docsite/rst/dev_guide/testing_running_locally.rst
index dcf7e6d9..0d03189b 100644
--- a/docs/docsite/rst/dev_guide/testing_running_locally.rst
+++ b/docs/docsite/rst/dev_guide/testing_running_locally.rst
@@ -2,44 +2,339 @@
.. _testing_running_locally:
-***************
-Testing Ansible
-***************
+*******************************
+Testing Ansible and Collections
+*******************************
-This document describes how to:
-
-* Run tests locally using ``ansible-test``
-* Extend
+This document describes how to run tests using ``ansible-test``.
.. contents::
:local:
-Requirements
-============
+Setup
+=====
-There are no special requirements for running ``ansible-test`` on Python 2.7 or later.
-The ``argparse`` package is required for Python 2.6.
-The requirements for each ``ansible-test`` command are covered later.
+Before running ``ansible-test``, set up your environment for :ref:`testing_an_ansible_collection` or
+:ref:`testing_ansible_core`, depending on which scenario applies to you.
+.. warning::
-Test Environments
-=================
+ If you use ``git`` for version control, make sure the files you are working with are not ignored by ``git``.
+ If they are, ``ansible-test`` will ignore them as well.
+
+.. _testing_an_ansible_collection:
+
+Testing an Ansible Collection
+-----------------------------
+
+If you are testing an Ansible Collection, you need a copy of the collection, preferably a git clone.
+For example, to work with the ``community.windows`` collection, follow these steps:
+
+1. Clone the collection you want to test into a valid collection root:
+
+ .. code-block:: shell
+
+ git clone https://github.com/ansible-collections/community.windows ~/dev/ansible_collections/community/windows
+
+ .. important::
+
+ The path must end with ``/ansible_collections/{collection_namespace}/{collection_name}`` where
+ ``{collection_namespace}`` is the namespace of the collection and ``{collection_name}`` is the collection name.
+
+2. Clone any collections on which the collection depends:
+
+ .. code-block:: shell
+
+ git clone https://github.com/ansible-collections/ansible.windows ~/dev/ansible_collections/ansible/windows
+
+ .. important::
+
+ If your collection has any dependencies on other collections, they must be in the same collection root, since
+ ``ansible-test`` will not use your configured collection roots (or other Ansible configuration).
+
+ .. note::
+
+ See the collection's ``galaxy.yml`` for a list of possible dependencies.
+
+3. Switch to the directory where the collection to test resides:
+
+ .. code-block:: shell
+
+ cd ~/dev/ansible_collections/community/windows
+
+.. _testing_ansible_core:
+
+Testing ``ansible-core``
+------------------------
+
+If you are testing ``ansible-core`` itself, you need a copy of the ``ansible-core`` source code, preferably a git clone.
+Having an installed copy of ``ansible-core`` is not sufficient or required.
+For example, to work with the ``ansible-core`` source cloned from GitHub, follow these steps:
+
+1. Clone the ``ansible-core`` repository:
+
+ .. code-block:: shell
+
+ git clone https://github.com/ansible/ansible ~/dev/ansible
+
+2. Switch to the directory where the ``ansible-core`` source resides:
+
+ .. code-block:: shell
+
+ cd ~/dev/ansible
+
+3. Add ``ansible-core`` programs to your ``PATH``:
+
+ .. code-block:: shell
+
+ source hacking/env-setup
+
+ .. note::
+
+ You can skip this step if you only need to run ``ansible-test``, and not other ``ansible-core`` programs.
+ In that case, simply run ``bin/ansible-test`` from the root of the ``ansible-core`` source.
+
+ .. caution::
+
+ If you have an installed version of ``ansible-core`` and are trying to run ``ansible-test`` from your ``PATH``,
+ make sure the program found by your shell is the one from the ``ansible-core`` source:
+
+ .. code-block:: shell
+
+ which ansible-test
+
+Commands
+========
+
+The most commonly used test commands are:
+
+* ``ansible-test sanity`` - Run sanity tests (mostly linters and static analysis).
+* ``ansible-test integration`` - Run integration tests.
+* ``ansible-test units`` - Run unit tests.
+
+Run ``ansible-test --help`` to see a complete list of available commands.
+
+.. note::
+
+ For detailed help on a specific command, add the ``--help`` option after the command.
+
+Environments
+============
Most ``ansible-test`` commands support running in one or more isolated test environments to simplify testing.
+Containers
+----------
+
+Containers are recommended for running sanity, unit and integration tests, since they provide consistent environments.
+Unit tests will be run with network isolation, which avoids unintentional dependencies on network resources.
+
+The ``--docker`` option runs tests in a container using either Docker or Podman.
+
+.. note::
+
+ If both Docker and Podman are installed, Docker will be used.
+ To override this, set the environment variable ``ANSIBLE_TEST_PREFER_PODMAN`` to any non-empty value.
+
+Choosing a container
+^^^^^^^^^^^^^^^^^^^^
+
+Without an additional argument, the ``--docker`` option uses the ``default`` container.
+To use another container, specify it immediately after the ``--docker`` option.
+
+.. note::
+
+ The ``default`` container is recommended for all sanity and unit tests.
+
+To see the list of supported containers, use the ``--help`` option with the ``ansible-test`` command you want to use.
+
+.. note::
+
+ The list of available containers is dependent on the ``ansible-test`` command you are using.
+
+You can also specify your own container.
+When doing so, you will need to indicate the Python version in the container with the ``--python`` option.
+
+Custom containers
+"""""""""""""""""
+
+When building custom containers, keep in mind the following requirements:
+
+* The ``USER`` should be ``root``.
+* Use an ``init`` process, such as ``systemd``.
+* Include ``sshd`` and accept connections on the default port of ``22``.
+* Include a POSIX compatible ``sh`` shell which can be found on ``PATH``.
+* Include a ``sleep`` utility which runs as a subprocess.
+* Include a supported version of Python.
+* Avoid using the ``VOLUME`` statement.
+
+Docker and SELinux
+^^^^^^^^^^^^^^^^^^
+
+Using Docker on a host with SELinux may require setting the system in permissive mode.
+Consider using Podman instead.
+
+Docker Desktop with WSL2
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+These instructions explain how to use ``ansible-test`` with WSL2 and Docker Desktop *without* ``systemd`` support.
-Remote
-------
+.. note::
-The ``--remote`` option runs tests in a cloud hosted environment.
-An API key is required to use this feature.
+ If your WSL2 environment includes ``systemd`` support, these steps are not required.
- Recommended for integration tests.
+.. _configuration_requirements:
-See the `list of supported platforms and versions <https://github.com/ansible/ansible/blob/devel/test/lib/ansible_test/_data/completion/remote.txt>`_ for additional details.
+Configuration requirements
+""""""""""""""""""""""""""
-Environment Variables
----------------------
+1. Open Docker Desktop and go to the **Settings** screen.
+2. On the the **General** tab:
+
+ a. Uncheck the **Start Docker Desktop when you log in** checkbox.
+ b. Check the **Use the WSL 2 based engine** checkbox.
+
+3. On the **Resources** tab under the **WSL Integration** section:
+
+ a. Enable distros you want to use under the **Enable integration with additional distros** section.
+
+4. Click **Apply and restart** if changes were made.
+
+.. _setup_instructions:
+
+Setup instructions
+""""""""""""""""""
+
+.. note::
+
+ If all WSL instances have been stopped, these changes will need to be re-applied.
+
+1. Verify Docker Desktop is properly configured (see :ref:`configuration_requirements`).
+2. Quit Docker Desktop if it is running:
+
+ a. Right click the **Docker Desktop** taskbar icon.
+ b. Click the **Quit Docker Desktop** option.
+
+3. Stop any running WSL instances with the command:
+
+ .. code-block:: shell
+
+ wsl --shutdown
+
+4. Verify all WSL instances have stopped with the command:
+
+ .. code-block:: shell
+
+ wsl -l -v
+
+5. Start a WSL instance and perform the following steps as ``root``:
+
+ a. Verify the ``systemd`` subsystem is not registered:
+
+ a. Check for the ``systemd`` cgroup hierarchy with the following command:
+
+ .. code-block:: shell
+
+ grep systemd /proc/self/cgroup
+
+ b. If any matches are found, re-check the :ref:`configuration_requirements` and follow the
+ :ref:`setup_instructions` again.
+
+ b. Mount the ``systemd`` cgroup hierarchy with the following commands:
+
+ .. code-block:: shell
+
+ mkdir /sys/fs/cgroup/systemd
+ mount cgroup -t cgroup /sys/fs/cgroup/systemd -o none,name=systemd,xattr
+
+6. Start Docker Desktop.
+
+You should now be able to use ``ansible-test`` with the ``--docker`` option.
+
+.. _linux_cgroup_configuration:
+
+Linux cgroup configuration
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. note::
+
+ These changes will need to be re-applied each time the container host is booted.
+
+For certain container hosts and container combinations, additional setup on the container host may be required.
+In these situations ``ansible-test`` will report an error and provide additional instructions to run as ``root``:
+
+.. code-block:: shell
+
+ mkdir /sys/fs/cgroup/systemd
+ mount cgroup -t cgroup /sys/fs/cgroup/systemd -o none,name=systemd,xattr
+
+If you are using rootless Podman, an additional command must be run, also as ``root``.
+Make sure to substitute your user and group for ``{user}`` and ``{group}`` respectively:
+
+.. code-block:: shell
+
+ chown -R {user}:{group} /sys/fs/cgroup/systemd
+
+Podman
+""""""
+
+When using Podman, you may need to stop existing Podman processes after following the :ref:`linux_cgroup_configuration`
+instructions. Otherwise Podman may be unable to see the new mount point.
+
+You can check to see if Podman is running by looking for ``podman`` and ``catatonit`` processes.
+
+Remote virtual machines
+-----------------------
+
+Remote virtual machines are recommended for running integration tests not suitable for execution in containers.
+
+The ``--remote`` option runs tests in a cloud hosted ephemeral virtual machine.
+
+.. note::
+
+ An API key is required to use this feature, unless running under an approved Azure Pipelines organization.
+
+To see the list of supported systems, use the ``--help`` option with the ``ansible-test`` command you want to use.
+
+.. note::
+
+ The list of available systems is dependent on the ``ansible-test`` command you are using.
+
+Python virtual environments
+---------------------------
+
+Python virtual environments provide a simple way to achieve isolation from the system and user Python environments.
+They are recommended for unit and integration tests when the ``--docker`` and ``--remote`` options cannot be used.
+
+The ``--venv`` option runs tests in a virtual environment managed by ``ansible-test``.
+Requirements are automatically installed before tests are run.
+
+Composite environment arguments
+-------------------------------
+
+The environment arguments covered in this document are sufficient for most use cases.
+However, some scenarios may require the additional flexibility offered by composite environment arguments.
+
+The ``--controller`` and ``--target`` options are alternatives to the ``--docker``, ``--remote`` and ``--venv`` options.
+
+.. note::
+
+ When using the ``shell`` command, the ``--target`` option is replaced by three platform specific options.
+
+Add the ``--help`` option to your ``ansible-test`` command to learn more about the composite environment arguments.
+
+Additional Requirements
+=======================
+
+Some ``ansible-test`` commands have additional requirements.
+You can use the ``--requirements`` option to automatically install them.
+
+.. note::
+
+ When using a test environment managed by ``ansible-test`` the ``--requirements`` option is usually unnecessary.
+
+Environment variables
+=====================
When using environment variables to manipulate tests there some limitations to keep in mind. Environment variables are:
@@ -49,18 +344,17 @@ When using environment variables to manipulate tests there some limitations to k
Example: ``ANSIBLE_KEEP_REMOTE_FILES=1`` can be set when running ``ansible-test integration --venv``. However, using the ``--docker`` option would
require running ``ansible-test shell`` to gain access to the Docker environment. Once at the shell prompt, the environment variable could be set
and the tests executed. This is useful for debugging tests inside a container by following the
- :ref:`Debugging AnsibleModule-based modules <debugging_modules>` instructions.
+ :ref:`debugging_modules` instructions.
-Interactive Shell
+Interactive shell
=================
Use the ``ansible-test shell`` command to get an interactive shell in the same environment used to run tests. Examples:
* ``ansible-test shell --docker`` - Open a shell in the default docker container.
-* ``ansible-test shell --venv --python 3.6`` - Open a shell in a Python 3.6 virtual environment.
+* ``ansible-test shell --venv --python 3.10`` - Open a shell in a Python 3.10 virtual environment.
-
-Code Coverage
+Code coverage
=============
Code coverage reports make it easy to identify untested code for which more tests should
@@ -72,22 +366,17 @@ aren't using the ``--venv`` or ``--docker`` options which create an isolated pyt
environment then you may have to use the ``--requirements`` option to ensure that the
correct version of the coverage module is installed:
-.. code-block:: shell-session
+.. code-block:: shell
ansible-test coverage erase
ansible-test units --coverage apt
ansible-test integration --coverage aws_lambda
ansible-test coverage html
-
Reports can be generated in several different formats:
* ``ansible-test coverage report`` - Console report.
* ``ansible-test coverage html`` - HTML report.
* ``ansible-test coverage xml`` - XML report.
-To clear data between test runs, use the ``ansible-test coverage erase`` command. For a full list of features see the online help:
-
-.. code-block:: shell-session
-
- ansible-test coverage --help
+To clear data between test runs, use the ``ansible-test coverage erase`` command.
diff --git a/docs/docsite/rst/getting_started/get_started_inventory.rst b/docs/docsite/rst/getting_started/get_started_inventory.rst
index 1e21f046..8df73302 100644
--- a/docs/docsite/rst/getting_started/get_started_inventory.rst
+++ b/docs/docsite/rst/getting_started/get_started_inventory.rst
@@ -14,7 +14,7 @@ Now let's create an inventory file that you can add to source control for flexib
.. note::
Inventory files can be in ``INI`` or ``YAML`` format.
- For demonstration purposes this section uses ``YAML`` format only.
+ For demonstration purposes, this section uses ``YAML`` format only.
Complete the following steps:
diff --git a/docs/docsite/rst/getting_started/index.rst b/docs/docsite/rst/getting_started/index.rst
index a1419393..818c7fbe 100644
--- a/docs/docsite/rst/getting_started/index.rst
+++ b/docs/docsite/rst/getting_started/index.rst
@@ -84,7 +84,7 @@ Continue by :ref:`learning how to build an inventory<get_started_inventory>`.
`Ansible Demos <https://github.com/ansible/product-demos>`_
Demonstrations of different Ansible usecases
- `RHEL Labs <https://katacoda.com/rhel-labs>`_
+ `Ansible Labs <https://www.ansible.com/products/ansible-training>`_
Labs to provide further knowledge on different topics
`Mailing List <https://groups.google.com/group/ansible-project>`_
Questions? Help? Ideas? Stop by the list on Google Groups
diff --git a/docs/docsite/rst/installation_guide/installation_distros.rst b/docs/docsite/rst/installation_guide/installation_distros.rst
index ce07c6a7..f7200416 100644
--- a/docs/docsite/rst/installation_guide/installation_distros.rst
+++ b/docs/docsite/rst/installation_guide/installation_distros.rst
@@ -104,7 +104,7 @@ You cannot use a Windows system for the Ansible control node. See :ref:`windows_
.. seealso::
- `Installing Ansible on ARch Linux <https://wiki.archlinux.org/title/Ansible#Installation>`_
+ `Installing Ansible on Arch Linux <https://wiki.archlinux.org/title/Ansible#Installation>`_
Distro-specific installation on Arch Linux
`Installing Ansible on Clear Linux <https://clearlinux.org/software/bundle/ansible>`_
Distro-specific installation on Clear Linux
diff --git a/docs/docsite/rst/inventory_guide/intro_dynamic_inventory.rst b/docs/docsite/rst/inventory_guide/intro_dynamic_inventory.rst
index 1a6113a6..e0d9c204 100644
--- a/docs/docsite/rst/inventory_guide/intro_dynamic_inventory.rst
+++ b/docs/docsite/rst/inventory_guide/intro_dynamic_inventory.rst
@@ -208,7 +208,7 @@ Note that the OpenStack dynamic inventory script will cache results to avoid rep
Other inventory scripts
=======================
-In Ansible 2.10 and later, inventory scripts moved to their associated collections. Many are now in the `community.general scripts/inventory directory <https://github.com/ansible-collections/community.general/tree/main/scripts/inventory>`_. We recommend you use :ref:`inventory_plugins` instead.
+In Ansible 2.10 and later, inventory scripts moved to their associated collections. Many are now in the `ansible-community/contrib-scripts repository <https://github.com/ansible-community/contrib-scripts/tree/main/inventory>`_. We recommend you use :ref:`inventory_plugins` instead.
.. _using_multiple_sources:
diff --git a/docs/docsite/rst/inventory_guide/intro_inventory.rst b/docs/docsite/rst/inventory_guide/intro_inventory.rst
index 6bc052c4..3fd6782d 100644
--- a/docs/docsite/rst/inventory_guide/intro_inventory.rst
+++ b/docs/docsite/rst/inventory_guide/intro_inventory.rst
@@ -128,6 +128,7 @@ Grouping groups: parent/child group relationships
You can create parent/child relationships among groups. Parent groups are also known as nested groups or groups of groups. For example, if all your production hosts are already in groups such as ``atlanta_prod`` and ``denver_prod``, you can create a ``production`` group that includes those smaller groups. This approach reduces maintenance because you can add or remove hosts from the parent group by editing the child groups.
To create parent/child relationships for groups:
+
* in INI format, use the ``:children`` suffix
* in YAML format, use the ``children:`` entry
@@ -292,7 +293,9 @@ We document adding variables in the main inventory file for simplicity. However,
Assigning a variable to one machine: host variables
===================================================
-You can easily assign a variable to a single host, then use it later in playbooks. You can do this directly in your inventory file. In INI:
+You can easily assign a variable to a single host, then use it later in playbooks. You can do this directly in your inventory file.
+
+In INI:
.. code-block:: text
@@ -376,7 +379,9 @@ Consider using YAML format for inventory sources to avoid confusion on the actua
Assigning a variable to many machines: group variables
======================================================
-If all hosts in a group share a variable value, you can apply that variable to an entire group at once. In INI:
+If all hosts in a group share a variable value, you can apply that variable to an entire group at once.
+
+In INI:
.. code-block:: text
@@ -545,6 +550,7 @@ When using multiple inventory sources, keep in mind that any variable conflicts
to the rules described in :ref:`how_we_merge` and :ref:`ansible_variable_precedence`. You can control the merging order of variables in inventory sources to get the variable value you need.
When you pass multiple inventory sources at the command line, Ansible merges variables in the order you pass those parameters. If ``[all:vars]`` in staging inventory defines ``myvar = 1`` and production inventory defines ``myvar = 2``, then:
+
* Pass ``-i staging -i production`` to run the playbook with ``myvar = 2``.
* Pass ``-i production -i staging`` to run the playbook with ``myvar = 1``.
diff --git a/docs/docsite/rst/os_guide/windows_faq.rst b/docs/docsite/rst/os_guide/windows_faq.rst
index 97325723..f582d7db 100644
--- a/docs/docsite/rst/os_guide/windows_faq.rst
+++ b/docs/docsite/rst/os_guide/windows_faq.rst
@@ -57,7 +57,7 @@ can be run in the bash terminal:
.. code-block:: shell
sudo apt-get update
- sudo apt-get install python-pip git libffi-dev libssl-dev -y
+ sudo apt-get install python3-pip git libffi-dev libssl-dev -y
pip install --user ansible pywinrm
To run Ansible from source instead of a release on the WSL, simply uninstall the pip
diff --git a/docs/docsite/rst/playbook_guide/complex_data_manipulation.rst b/docs/docsite/rst/playbook_guide/complex_data_manipulation.rst
index f94fb88f..11ed3c38 100644
--- a/docs/docsite/rst/playbook_guide/complex_data_manipulation.rst
+++ b/docs/docsite/rst/playbook_guide/complex_data_manipulation.rst
@@ -295,7 +295,7 @@ An example on how to use facts to find a host's data that meets condition X:
vars:
uptime_of_host_most_recently_rebooted: "{{ansible_play_hosts_all | map('extract', hostvars, 'ansible_uptime_seconds') | sort | first}}"
-An example to show a host uptime in days/hours/minutes/seconds (assumes facts where gathered).
+An example to show a host uptime in days/hours/minutes/seconds (assumes facts were gathered).
.. code-block:: YAML+Jinja
diff --git a/docs/docsite/rst/playbook_guide/playbooks_blocks.rst b/docs/docsite/rst/playbook_guide/playbooks_blocks.rst
index a6281646..805045da 100644
--- a/docs/docsite/rst/playbook_guide/playbooks_blocks.rst
+++ b/docs/docsite/rst/playbook_guide/playbooks_blocks.rst
@@ -43,7 +43,7 @@ All tasks in a block inherit directives applied at the block level. Most of what
become_user: root
ignore_errors: true
-In the example above, the 'when' condition will be evaluated before Ansible runs each of the three tasks in the block. All three tasks also inherit the privilege escalation directives, running as the root user. Finally, ``ignore_errors: yes`` ensures that Ansible continues to execute the playbook even if some of the tasks fail.
+In the example above, the 'when' condition will be evaluated before Ansible runs each of the three tasks in the block. All three tasks also inherit the privilege escalation directives, running as the root user. Finally, ``ignore_errors: true`` ensures that Ansible continues to execute the playbook even if some of the tasks fail.
Names for blocks have been available since Ansible 2.3. We recommend using names in all tasks, within blocks or elsewhere, for better visibility into the tasks being executed when you run the playbook.
diff --git a/docs/docsite/rst/playbook_guide/playbooks_filters.rst b/docs/docsite/rst/playbook_guide/playbooks_filters.rst
index 6063c061..52fb861a 100644
--- a/docs/docsite/rst/playbook_guide/playbooks_filters.rst
+++ b/docs/docsite/rst/playbook_guide/playbooks_filters.rst
@@ -81,7 +81,7 @@ A convenient way of requiring a variable to be overridden is to give it an undef
.. code-block:: yaml+jinja
galaxy_url: "https://galaxy.ansible.com"
- galaxy_api_key: {{ undef(hint="You must specify your Galaxy API key") }}
+ galaxy_api_key: "{{ undef(hint='You must specify your Galaxy API key') }}"
Defining different values for true/false/null (ternary)
=======================================================
diff --git a/docs/docsite/rst/playbook_guide/playbooks_variables.rst b/docs/docsite/rst/playbook_guide/playbooks_variables.rst
index ad31c0ea..9482845d 100644
--- a/docs/docsite/rst/playbook_guide/playbooks_variables.rst
+++ b/docs/docsite/rst/playbook_guide/playbooks_variables.rst
@@ -331,14 +331,16 @@ When passing variables with ``--extra-vars``, you must escape quotes and other s
ansible-playbook arcade.yml --extra-vars '{"name":"Conan O'\\\''Brien"}'
ansible-playbook script.yml --extra-vars "{\"dialog\":\"He said \\\"I just can\'t get enough of those single and double-quotes"\!"\\\"\"}"
-If you have a lot of special characters, use a JSON or YAML file containing the variable definitions.
vars from a JSON or YAML file
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+If you have a lot of special characters, use a JSON or YAML file containing the variable definitions. Prepend both JSON and YAML filenames with `@`.
+
.. code-block:: text
ansible-playbook release.yml --extra-vars "@some_file.json"
+ ansible-playbook release.yml --extra-vars "@some_file.yaml"
.. _ansible_variable_precedence:
diff --git a/docs/docsite/rst/plugins/callback.rst b/docs/docsite/rst/plugins/callback.rst
index f86469af..98e4beb7 100644
--- a/docs/docsite/rst/plugins/callback.rst
+++ b/docs/docsite/rst/plugins/callback.rst
@@ -36,7 +36,7 @@ Most callbacks shipped with Ansible are disabled by default and need to be enabl
Setting a callback plugin for ``ansible-playbook``
--------------------------------------------------
-You can only have one plugin be the main manager of your console output. If you want to replace the default, you should define CALLBACK_TYPE = stdout in the subclass and then configure the stdout plugin in :ref:`ansible.cfg <ansible_configuration_settings>`. For example:
+You can only have one plugin be the main manager of your console output. If you want to replace the default, you should define ``CALLBACK_TYPE = stdout`` in the subclass and then configure the stdout plugin in :ref:`ansible.cfg <ansible_configuration_settings>`. For example:
.. code-block:: ini
@@ -67,6 +67,25 @@ You can also set this as an environment variable:
export ANSIBLE_LOAD_CALLBACK_PLUGINS=1
+.. _callback_plugin_types:
+
+Types of callback plugins
+-------------------------
+
+There are three types of callback plugins:
+
+:stdout callback plugins:
+
+ These plugins handle the main console output. Only one of these can be active.
+
+:aggregate callback plugins:
+
+ Aggregate callbacks can add additional console output next to a stdout callback. This can be aggregate information at the end of a playbook run, additional per-task output, or anything else.
+
+:notification callback plugins:
+
+ Notification callbacks inform other applications, services, or systems. This can be anything from logging to databases, informing on errors in Instant Messaging applications, or sending emails when a server is unreachable.
+
.. _callback_plugin_list:
Plugin list
diff --git a/docs/docsite/rst/porting_guides/porting_guide_6.rst b/docs/docsite/rst/porting_guides/porting_guide_6.rst
index 87756b2e..a111451f 100644
--- a/docs/docsite/rst/porting_guides/porting_guide_6.rst
+++ b/docs/docsite/rst/porting_guides/porting_guide_6.rst
@@ -99,6 +99,58 @@ Networking
No notable changes
+Porting Guide for v6.7.0
+========================
+
+Known Issues
+------------
+
+community.routeros
+~~~~~~~~~~~~~~~~~~
+
+- api_modify - when limits for entries in ``queue tree`` are defined as human readable - for example ``25M`` -, the configuration will be correctly set in ROS, but the module will indicate the item is changed on every run even when there was no change done. This is caused by the ROS API which returns the number in bytes - for example ``25000000`` (which is inconsistent with the CLI behavior). In order to mitigate that, the limits have to be defined in bytes (those will still appear as human readable in the ROS CLI) (https://github.com/ansible-collections/community.routeros/pull/131).
+- api_modify, api_info - ``routing ospf area``, ``routing ospf area range``, ``routing ospf instance``, ``routing ospf interface-template`` paths are not fully implemeted for ROS6 due to the significat changes between ROS6 and ROS7 (https://github.com/ansible-collections/community.routeros/pull/131).
+
+Major Changes
+-------------
+
+cisco.meraki
+~~~~~~~~~~~~
+
+- meraki_mr_l7_firewall - New module
+- meraki_webhook_payload_template - New module
+
+community.zabbix
+~~~~~~~~~~~~~~~~
+
+- all modules are opting away from zabbix-api and using httpapi ansible.netcommon plugin. We will support zabbix-api for backwards compatibility until next major release. See our README.md for more information about how to migrate
+- zabbix_agent and zabbix_proxy roles are opting away from zabbix-api and use httpapi ansible.netcommon plugin. We will support zabbix-api for backwards compatibility until next major release. See our README.md for more information about how to migrate
+
+containers.podman
+~~~~~~~~~~~~~~~~~
+
+- New become plugin - podman_unshare
+- Podman generate systemd module
+
+fortinet.fortimanager
+~~~~~~~~~~~~~~~~~~~~~
+
+- Fix compatibility issue for ansible 2.9.x and ansible-base 2.10.x.
+- support Ansible changelogs.
+
+fortinet.fortios
+~~~~~~~~~~~~~~~~
+
+- Support FortiOS v7.0.6, v7.0.7, v7.0.8, v7.2.1, v7.2.2.
+
+Deprecated Features
+-------------------
+
+community.general
+~~~~~~~~~~~~~~~~~
+
+- Please note that some tools, like the VScode plugin (https://github.com/ansible/vscode-ansible/issues/573), or ``ansible-doc --list --type module``, suggest to replace the correct FQCNs for modules and actions in community.general with internal names that have more than three components. For example, ``community.general.ufw`` is suggested to be replaced by ``community.general.system.ufw``. While these longer names do work, they are considered **internal names** by the collection and are subject to change and be removed at all time. They **will** be removed in community.general 6.0.0 and result in deprecation messages. Avoid using these internal names, and use general three-component FQCNs (``community.general.<name_of_module>``) instead (https://github.com/ansible-collections/community.general/pull/5373).
+
Porting Guide for v6.6.0
========================
@@ -131,6 +183,12 @@ community.general
- newrelic_deployment - removed New Relic v1 API, added support for v2 API (https://github.com/ansible-collections/community.general/pull/5341).
+fortinet.fortimanager
+~~~~~~~~~~~~~~~~~~~~~
+
+- Many fixes for Ansible sanity test warnings & errors.
+- Support FortiManager Schema 7.2.0 , 98 new modules
+
Deprecated Features
-------------------
diff --git a/docs/docsite/rst/porting_guides/porting_guide_7.rst b/docs/docsite/rst/porting_guides/porting_guide_7.rst
index 5c3d6fae..737cf3e3 100644
--- a/docs/docsite/rst/porting_guides/porting_guide_7.rst
+++ b/docs/docsite/rst/porting_guides/porting_guide_7.rst
@@ -92,6 +92,57 @@ Networking
No notable changes
+Porting Guide for v7.1.0
+========================
+
+Added Collections
+-----------------
+
+- grafana.grafana (version 1.1.0)
+
+Known Issues
+------------
+
+community.routeros
+~~~~~~~~~~~~~~~~~~
+
+- api_modify - when limits for entries in ``queue tree`` are defined as human readable - for example ``25M`` -, the configuration will be correctly set in ROS, but the module will indicate the item is changed on every run even when there was no change done. This is caused by the ROS API which returns the number in bytes - for example ``25000000`` (which is inconsistent with the CLI behavior). In order to mitigate that, the limits have to be defined in bytes (those will still appear as human readable in the ROS CLI) (https://github.com/ansible-collections/community.routeros/pull/131).
+- api_modify, api_info - ``routing ospf area``, ``routing ospf area range``, ``routing ospf instance``, ``routing ospf interface-template`` paths are not fully implemeted for ROS6 due to the significat changes between ROS6 and ROS7 (https://github.com/ansible-collections/community.routeros/pull/131).
+
+Major Changes
+-------------
+
+cisco.meraki
+~~~~~~~~~~~~
+
+- meraki_mr_l7_firewall - New module
+- meraki_webhook_payload_template - New module
+
+community.zabbix
+~~~~~~~~~~~~~~~~
+
+- all modules are opting away from zabbix-api and using httpapi ansible.netcommon plugin. We will support zabbix-api for backwards compatibility until next major release. See our README.md for more information about how to migrate
+- zabbix_agent and zabbix_proxy roles are opting away from zabbix-api and use httpapi ansible.netcommon plugin. We will support zabbix-api for backwards compatibility until next major release. See our README.md for more information about how to migrate
+
+containers.podman
+~~~~~~~~~~~~~~~~~
+
+- New become plugin - podman_unshare
+- Podman generate systemd module
+
+fortinet.fortios
+~~~~~~~~~~~~~~~~
+
+- Support FortiOS v7.0.6, v7.0.7, v7.0.8, v7.2.1, v7.2.2.
+
+Deprecated Features
+-------------------
+
+community.general
+~~~~~~~~~~~~~~~~~
+
+- The ``sap`` modules ``sapcar_extract``, ``sap_task_list_execute``, and ``hana_query``, will be removed from this collection in community.general 7.0.0 and replaced with redirects to ``community.sap_libs``. If you want to continue using these modules, make sure to also install ``community.sap_libs`` (it is part of the Ansible package) (https://github.com/ansible-collections/community.general/pull/5614).
+
Porting Guide for v7.0.0
========================
diff --git a/docs/docsite/rst/reference_appendices/release_and_maintenance.rst b/docs/docsite/rst/reference_appendices/release_and_maintenance.rst
index a5039fc4..cf523778 100644
--- a/docs/docsite/rst/reference_appendices/release_and_maintenance.rst
+++ b/docs/docsite/rst/reference_appendices/release_and_maintenance.rst
@@ -115,52 +115,181 @@ You can refer to the :ref:`core_porting_guides` for tips on updating your playbo
You can install ``ansible-core`` with ``pip``. See :ref:`intro_installation_guide` for details.
.. _release_schedule:
+.. _support_life:
+
+``ansible-core`` support matrix
+-------------------------------
+
+This table links to the changelogs for each major ``ansible-core`` release. These changelogs contain the dates and significant changes in each minor release.
+Dates listed indicate the start date of the maintenance cycle.
+
+.. list-table::
+ :header-rows: 1
+
+ * - Version
+ - Support
+ - End Of Life
+ - Controller Python
+ - Target Python / PowerShell
+ * - `2.15`_
+ - | GA: 22 May 2023
+ | Critical: 06 Nov 2023
+ | Security: 20 May 2024
+ - Nov 2024
+ - | Python 3.9 - 3.11
+ - | Python 2.7
+ | Python 3.5 - 3.11
+ | PowerShell 3 - 5.1
+ * - `2.14`_
+ - | GA: 07 Nov 2022
+ | Critical: 22 May 2023
+ | Security: 06 Nov 2023
+ - 20 May 2024
+ - | Python 3.9 - 3.11
+ - | Python 2.7
+ | Python 3.5 - 3.11
+ | PowerShell 3 - 5.1
+ * - `2.13`_
+ - | GA: 23 May 2022
+ | Critical: 07 Nov 2022
+ | Security: 22 May 2023
+ - 06 Nov 2023
+ - | Python 3.8 - 3.10
+ - | Python 2.7
+ | Python 3.5 - 3.10
+ | PowerShell 3 - 5.1
+ * - `2.12`_
+ - | GA: 08 Nov 2021
+ | Critical: 23 May 2022
+ | Security: 07 Nov 2022
+ - 22 May 2023
+ - | Python 3.8 - 3.10
+ - | Python 2.6
+ | Python 3.5 - 3.10
+ | PowerShell 3 - 5.1
+ * - `2.11`_
+ - | GA: 26 Apr 2021
+ | Critical: 08 Nov 2021
+ | Security: 23 May 2022
+ - | **EOL**
+ | 07 Nov 2022
+ - | Python 2.7
+ | Python 3.5 - 3.9
+ - | Python 2.6 - 2.7
+ | Python 3.5 - 3.9
+ | PowerShell 3 - 5.1
+ * - `2.10`_
+ - | GA: 13 Aug 2020
+ | Critical: 26 Apr 2021
+ | Security: 08 Nov 2021
+ - | **EOL**
+ | 23 May 2022
+ - | Python 2.7
+ | Python 3.5 - 3.9
+ - | Python 2.6 - 2.7
+ | Python 3.5 - 3.9
+ | PowerShell 3 - 5.1
+ * - `2.9`_
+ - | GA: 31 Oct 2019
+ | Critical: 13 Aug 2020
+ | Security: 26 Apr 2021
+ - | **EOL**
+ | 23 May 2022
+ - | Python 2.7
+ | Python 3.5 - 3.8
+ - | Python 2.6 - 2.7
+ | Python 3.5 - 3.8
+ | PowerShell 3 - 5.1
+.. * - 2.16
+.. - 06 Nov 2023
+.. - 20 May 2024
+.. - Nov 2024
+.. - May 2025
+.. - | Python 3.10 - 3.12
+.. - | Python 3.6 - 3.12
+.. | PowerShell TBD
+.. * - 2.17
+.. - 20 May 2024
+.. - Nov 2024
+.. - May 2025
+.. - Nov 2025
+.. - | Python 3.10 - 3.12
+.. - | Python 3.6 - 3.12
+.. | PowerShell TBD
+.. * - 2.18
+.. - Nov 2024
+.. - May 2025
+.. - Nov 2025
+.. - May 2026
+.. - | Python 3.11 - 3.13
+.. - | Python 3.6 - 3.13
+.. | PowerShell TBD
+.. * - 2.19
+.. - May 2025
+.. - Nov 2025
+.. - May 2026
+.. - Nov 2026
+.. - | Python 3.11 - 3.13
+.. - | Python 3.6 - 3.13
+.. | PowerShell TBD
+.. * - 2.20
+.. - Nov 2025
+.. - May 2026
+.. - Nov 2026
+.. - May 2027
+.. - | Python 3.12 - 3.14
+.. - | Python 3.8 - 3.14
+.. | PowerShell TBD
+.. * - 2.21
+.. - May 2026
+.. - Nov 2026
+.. - May 2027
+.. - Nov 2027
+.. - | Python 3.12 - 3.14
+.. - | Python 3.8 - 3.14
+.. | PowerShell TBD
+.. * - 2.22
+.. - Nov 2026
+.. - May 2027
+.. - Nov 2027
+.. - May 2028
+.. - | Python 3.13 - 3.15
+.. - | Python 3.8 - 3.15
+.. | PowerShell TBD
+.. * - 2.23
+.. - May 2027
+.. - Nov 2027
+.. - May 2028
+.. - Nov 2028
+.. - | Python 3.13 - 3.15
+.. - | Python 3.8 - 3.15
+.. | PowerShell TBD
+.. * - 2.24
+.. - Nov 2027
+.. - May 2028
+.. - Nov 2028
+.. - May 2029
+.. - | Python 3.14 - 3.16
+.. - | Python 3.8 - 3.16
+.. | PowerShell TBD
+.. * - 2.25
+.. - May 2028
+.. - Nov 2028
+.. - May 2029
+.. - Nov 2029
+.. - | Python 3.14 - 3.16
+.. - | Python 3.8 - 3.16
+.. | PowerShell TBD
-``ansible-core`` changelogs
-----------------------------
-This table links to the changelogs for each major ``ansible-core``
-release. These changelogs contain the dates and
-significant changes in each minor release.
-
-================================= ==================================================== ======================
-``ansible-core``/``ansible-base`` Status Expected end of life
- Release
-================================= ==================================================== ======================
-devel In development (ansible-core 2.15 unreleased, trunk) TBD
-`2.14 ansible-core Changelogs`_ Maintained (security **and** general bug fixes) May 2024
-`2.13 ansible-core Changelogs`_ Maintained (security **and** critical bug fixes) Nov 2023
-`2.12 ansible-core Changelogs`_ Maintained (security bug fixes only) May 2023
-`2.11 ansible-core Changelogs`_ Unmaintained (end of life) EOL
-`2.10 ansible-base Changelogs`_ Unmaintained (end of life) EOL
-`2.9 Changelogs`_ Unmaintained (end of life) EOL
-`2.8 Changelogs`_ Unmaintained (end of life) EOL
-`2.7 Changelogs`_ Unmaintained (end of life) EOL
-`2.6 Changelogs`_ Unmaintained (end of life) EOL
-`2.5 Changelogs`_ Unmaintained (end of life) EOL
-<2.5 Unmaintained (end of life) EOL
-================================= ==================================================== ======================
-
-.. _2.14 ansible-core Changelogs:
-.. _2.14: https://github.com/ansible/ansible/blob/stable-2.14/changelogs/CHANGELOG-v2.14.rst
-.. _2.13 ansible-core Changelogs:
-.. _2.13: https://github.com/ansible/ansible/blob/stable-2.13/changelogs/CHANGELOG-v2.13.rst
-.. _2.12 ansible-core Changelogs:
-.. _2.12: https://github.com/ansible/ansible/blob/stable-2.12/changelogs/CHANGELOG-v2.12.rst
-.. _2.11 ansible-core Changelogs:
-.. _2.11: https://github.com/ansible/ansible/blob/stable-2.11/changelogs/CHANGELOG-v2.11.rst
-.. _2.10 ansible-base Changelogs:
-.. _2.10: https://github.com/ansible/ansible/blob/stable-2.10/changelogs/CHANGELOG-v2.10.rst
-.. _2.9 Changelogs:
.. _2.9: https://github.com/ansible/ansible/blob/stable-2.9/changelogs/CHANGELOG-v2.9.rst
-.. _2.8 Changelogs:
-.. _2.8: https://github.com/ansible/ansible/blob/stable-2.8/changelogs/CHANGELOG-v2.8.rst
-.. _2.7 Changelogs: https://github.com/ansible/ansible/blob/stable-2.7/changelogs/CHANGELOG-v2.7.rst
-.. _2.6 Changelogs:
-.. _2.6: https://github.com/ansible/ansible/blob/stable-2.6/changelogs/CHANGELOG-v2.6.rst
-.. _2.5 Changelogs: https://github.com/ansible/ansible/blob/stable-2.5/changelogs/CHANGELOG-v2.5.rst
-.. _support_life:
-.. _methods:
+.. _2.10: https://github.com/ansible/ansible/blob/stable-2.10/changelogs/CHANGELOG-v2.10.rst
+.. _2.11: https://github.com/ansible/ansible/blob/stable-2.11/changelogs/CHANGELOG-v2.11.rst
+.. _2.12: https://github.com/ansible/ansible/blob/stable-2.12/changelogs/CHANGELOG-v2.12.rst
+.. _2.13: https://github.com/ansible/ansible/blob/stable-2.13/changelogs/CHANGELOG-v2.13.rst
+.. _2.14: https://github.com/ansible/ansible/blob/stable-2.14/changelogs/CHANGELOG-v2.14.rst
+.. _2.15: https://github.com/ansible/ansible/blob/stable-2.15/changelogs/CHANGELOG-v2.15.rst
+
Preparing for a new release
===========================
diff --git a/docs/docsite/rst/scenario_guides/vmware_rest_scenarios/authentication.rst b/docs/docsite/rst/scenario_guides/vmware_rest_scenarios/authentication.rst
index 79c55102..4f09cbe1 100644
--- a/docs/docsite/rst/scenario_guides/vmware_rest_scenarios/authentication.rst
+++ b/docs/docsite/rst/scenario_guides/vmware_rest_scenarios/authentication.rst
@@ -33,7 +33,7 @@ Module parameters
All the vcenter_rest modules accept the following arguments:
-- ``vcenter_host``
+- ``vcenter_hostname``
- ``vcenter_username``
- ``vcenter_password``
diff --git a/docs/docsite/sphinx_conf/all_conf.py b/docs/docsite/sphinx_conf/all_conf.py
index 46eb8baa..58db693a 100644
--- a/docs/docsite/sphinx_conf/all_conf.py
+++ b/docs/docsite/sphinx_conf/all_conf.py
@@ -283,20 +283,13 @@ autoclass_content = 'both'
# name: ('http://UPSTREAM_URL', (None, 'path/to/local/cache.inv'))
#
# The update script depends on this format so deviating from this (for instance, adding a third
-# location for the mappning to live) will confuse it.
+# location for the mapping to live) will confuse it.
intersphinx_mapping = {'python': ('https://docs.python.org/2/', (None, '../python2.inv')),
'python3': ('https://docs.python.org/3/', (None, '../python3.inv')),
'jinja2': ('http://jinja.palletsprojects.com/', (None, '../jinja2.inv')),
+ 'ansible_7': ('https://docs.ansible.com/ansible/7/', (None, '../ansible_7.inv')),
'ansible_6': ('https://docs.ansible.com/ansible/6/', (None, '../ansible_6.inv')),
- 'ansible_5': ('https://docs.ansible.com/ansible/5/', (None, '../ansible_5.inv')),
- 'ansible_4': ('https://docs.ansible.com/ansible/4/', (None, '../ansible_4.inv')),
- 'ansible_3': ('https://docs.ansible.com/ansible/3/', (None, '../ansible_3.inv')),
- 'ansible_2_10': ('https://docs.ansible.com/ansible/2.10/', (None, '../ansible_2_10.inv')),
'ansible_2_9': ('https://docs.ansible.com/ansible/2.9/', (None, '../ansible_2_9.inv')),
- 'ansible_2_8': ('https://docs.ansible.com/ansible/2.8/', (None, '../ansible_2_8.inv')),
- 'ansible_2_7': ('https://docs.ansible.com/ansible/2.7/', (None, '../ansible_2_7.inv')),
- 'ansible_2_6': ('https://docs.ansible.com/ansible/2.6/', (None, '../ansible_2_6.inv')),
- 'ansible_2_5': ('https://docs.ansible.com/ansible/2.5/', (None, '../ansible_2_5.inv')),
}
# linckchecker settings
diff --git a/docs/docsite/sphinx_conf/ansible_conf.py b/docs/docsite/sphinx_conf/ansible_conf.py
index 1ccf6966..1fa8cab8 100644
--- a/docs/docsite/sphinx_conf/ansible_conf.py
+++ b/docs/docsite/sphinx_conf/ansible_conf.py
@@ -297,16 +297,9 @@ autoclass_content = 'both'
intersphinx_mapping = {'python': ('https://docs.python.org/2/', (None, '../python2.inv')),
'python3': ('https://docs.python.org/3/', (None, '../python3.inv')),
'jinja2': ('http://jinja.palletsprojects.com/', (None, '../jinja2.inv')),
+ 'ansible_7': ('https://docs.ansible.com/ansible/7/', (None, '../ansible_7.inv')),
'ansible_6': ('https://docs.ansible.com/ansible/6/', (None, '../ansible_6.inv')),
- 'ansible_5': ('https://docs.ansible.com/ansible/5/', (None, '../ansible_5.inv')),
- 'ansible_4': ('https://docs.ansible.com/ansible/4/', (None, '../ansible_4.inv')),
- 'ansible_3': ('https://docs.ansible.com/ansible/3/', (None, '../ansible_3.inv')),
- 'ansible_2_10': ('https://docs.ansible.com/ansible/2.10/', (None, '../ansible_2_10.inv')),
'ansible_2_9': ('https://docs.ansible.com/ansible/2.9/', (None, '../ansible_2_9.inv')),
- 'ansible_2_8': ('https://docs.ansible.com/ansible/2.8/', (None, '../ansible_2_8.inv')),
- 'ansible_2_7': ('https://docs.ansible.com/ansible/2.7/', (None, '../ansible_2_7.inv')),
- 'ansible_2_6': ('https://docs.ansible.com/ansible/2.6/', (None, '../ansible_2_6.inv')),
- 'ansible_2_5': ('https://docs.ansible.com/ansible/2.5/', (None, '../ansible_2_5.inv')),
}
# linckchecker settings
diff --git a/docs/docsite/sphinx_conf/core_conf.py b/docs/docsite/sphinx_conf/core_conf.py
index 58237467..7f1663b3 100644
--- a/docs/docsite/sphinx_conf/core_conf.py
+++ b/docs/docsite/sphinx_conf/core_conf.py
@@ -336,16 +336,9 @@ autoclass_content = 'both'
intersphinx_mapping = {'python': ('https://docs.python.org/2/', (None, '../python2.inv')),
'python3': ('https://docs.python.org/3/', (None, '../python3.inv')),
'jinja2': ('http://jinja.palletsprojects.com/', (None, '../jinja2.inv')),
+ 'ansible_7': ('https://docs.ansible.com/ansible/7/', (None, '../ansible_7.inv')),
'ansible_6': ('https://docs.ansible.com/ansible/6/', (None, '../ansible_6.inv')),
- 'ansible_5': ('https://docs.ansible.com/ansible/5/', (None, '../ansible_5.inv')),
- 'ansible_4': ('https://docs.ansible.com/ansible/4/', (None, '../ansible_4.inv')),
- 'ansible_3': ('https://docs.ansible.com/ansible/3/', (None, '../ansible_3.inv')),
- 'ansible_2_10': ('https://docs.ansible.com/ansible/2.10/', (None, '../ansible_2_10.inv')),
'ansible_2_9': ('https://docs.ansible.com/ansible/2.9/', (None, '../ansible_2_9.inv')),
- 'ansible_2_8': ('https://docs.ansible.com/ansible/2.8/', (None, '../ansible_2_8.inv')),
- 'ansible_2_7': ('https://docs.ansible.com/ansible/2.7/', (None, '../ansible_2_7.inv')),
- 'ansible_2_6': ('https://docs.ansible.com/ansible/2.6/', (None, '../ansible_2_6.inv')),
- 'ansible_2_5': ('https://docs.ansible.com/ansible/2.5/', (None, '../ansible_2_5.inv')),
}
# linckchecker settings
diff --git a/docs/docsite/sphinx_conf/core_lang_conf.py b/docs/docsite/sphinx_conf/core_lang_conf.py
index 9b84c87a..49efc6ee 100644
--- a/docs/docsite/sphinx_conf/core_lang_conf.py
+++ b/docs/docsite/sphinx_conf/core_lang_conf.py
@@ -336,16 +336,9 @@ autoclass_content = 'both'
intersphinx_mapping = {'python': ('https://docs.python.org/2/', (None, '../python2.inv')),
'python3': ('https://docs.python.org/3/', (None, '../python3.inv')),
'jinja2': ('http://jinja.palletsprojects.com/', (None, '../jinja2.inv')),
+ 'ansible_7': ('https://docs.ansible.com/ansible/7/', (None, '../ansible_7.inv')),
'ansible_6': ('https://docs.ansible.com/ansible/6/', (None, '../ansible_6.inv')),
- 'ansible_5': ('https://docs.ansible.com/ansible/5/', (None, '../ansible_5.inv')),
- 'ansible_4': ('https://docs.ansible.com/ansible/4/', (None, '../ansible_4.inv')),
- 'ansible_3': ('https://docs.ansible.com/ansible/3/', (None, '../ansible_3.inv')),
- 'ansible_2_10': ('https://docs.ansible.com/ansible/2.10/', (None, '../ansible_2_10.inv')),
'ansible_2_9': ('https://docs.ansible.com/ansible/2.9/', (None, '../ansible_2_9.inv')),
- 'ansible_2_8': ('https://docs.ansible.com/ansible/2.8/', (None, '../ansible_2_8.inv')),
- 'ansible_2_7': ('https://docs.ansible.com/ansible/2.7/', (None, '../ansible_2_7.inv')),
- 'ansible_2_6': ('https://docs.ansible.com/ansible/2.6/', (None, '../ansible_2_6.inv')),
- 'ansible_2_5': ('https://docs.ansible.com/ansible/2.5/', (None, '../ansible_2_5.inv')),
}
# linckchecker settings
diff --git a/docs/man/man1/ansible-config.1 b/docs/man/man1/ansible-config.1
index daf4a350..11fa7f1f 100644
--- a/docs/man/man1/ansible-config.1
+++ b/docs/man/man1/ansible-config.1
@@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
..
-.TH "ANSIBLE-CONFIG" 1 "" "Ansible 2.14.1" "System administration commands"
+.TH "ANSIBLE-CONFIG" 1 "" "Ansible 2.14.2" "System administration commands"
.SH NAME
ansible-config \- View ansible configuration.
.SH SYNOPSIS
diff --git a/docs/man/man1/ansible-console.1 b/docs/man/man1/ansible-console.1
index b7dd9969..432d7773 100644
--- a/docs/man/man1/ansible-console.1
+++ b/docs/man/man1/ansible-console.1
@@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
..
-.TH "ANSIBLE-CONSOLE" 1 "" "Ansible 2.14.1" "System administration commands"
+.TH "ANSIBLE-CONSOLE" 1 "" "Ansible 2.14.2" "System administration commands"
.SH NAME
ansible-console \- REPL console for executing Ansible tasks.
.SH SYNOPSIS
@@ -238,7 +238,7 @@ ask for privilege escalation password
\fB\-M\fP, \fB\-\-module\-path\fP
.INDENT 0.0
.INDENT 3.5
-prepend colon\-separated path(s) to module library (default={{ ANSIBLE_HOME ~ "/plugins/modules:/usr/share/ansible/plugins/modules" }})
+prepend colon\-separated path(s) to module library (default={{ ANSIBLE_HOME ~ \(dq/plugins/modules:/usr/share/ansible/plugins/modules\(dq }})
.UNINDENT
.UNINDENT
.sp
diff --git a/docs/man/man1/ansible-doc.1 b/docs/man/man1/ansible-doc.1
index 9dae87a3..fe61cd4b 100644
--- a/docs/man/man1/ansible-doc.1
+++ b/docs/man/man1/ansible-doc.1
@@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
..
-.TH "ANSIBLE-DOC" 1 "" "Ansible 2.14.1" "System administration commands"
+.TH "ANSIBLE-DOC" 1 "" "Ansible 2.14.2" "System administration commands"
.SH NAME
ansible-doc \- plugin documentation tool
.SH SYNOPSIS
@@ -46,7 +46,7 @@ ansible-doc \- plugin documentation tool
displays information on modules installed in Ansible libraries.
It displays a terse listing of plugins and their short descriptions,
provides a printout of their DOCUMENTATION strings,
-and it can create a short "snippet" which can be pasted into a playbook.
+and it can create a short \(dqsnippet\(dq which can be pasted into a playbook.
.SH COMMON OPTIONS
.INDENT 0.0
.INDENT 3.5
@@ -92,7 +92,7 @@ Show plugin names and their source files without summaries (implies \-\-list). A
\fB\-M\fP, \fB\-\-module\-path\fP
.INDENT 0.0
.INDENT 3.5
-prepend colon\-separated path(s) to module library (default={{ ANSIBLE_HOME ~ "/plugins/modules:/usr/share/ansible/plugins/modules" }})
+prepend colon\-separated path(s) to module library (default={{ ANSIBLE_HOME ~ \(dq/plugins/modules:/usr/share/ansible/plugins/modules\(dq }})
.UNINDENT
.UNINDENT
.sp
@@ -141,7 +141,7 @@ Show playbook snippet for these plugin types: inventory, lookup, module
\fB\-t\fP \(aqTYPE\(aq, \fB\-\-type\fP \(aqTYPE\(aq
.INDENT 0.0
.INDENT 3.5
-Choose which plugin type (defaults to "module"). Available plugin types are : (\(aqbecome\(aq, \(aqcache\(aq, \(aqcallback\(aq, \(aqcliconf\(aq, \(aqconnection\(aq, \(aqhttpapi\(aq, \(aqinventory\(aq, \(aqlookup\(aq, \(aqnetconf\(aq, \(aqshell\(aq, \(aqvars\(aq, \(aqmodule\(aq, \(aqstrategy\(aq, \(aqtest\(aq, \(aqfilter\(aq, \(aqrole\(aq, \(aqkeyword\(aq)
+Choose which plugin type (defaults to \(dqmodule\(dq). Available plugin types are : (\(aqbecome\(aq, \(aqcache\(aq, \(aqcallback\(aq, \(aqcliconf\(aq, \(aqconnection\(aq, \(aqhttpapi\(aq, \(aqinventory\(aq, \(aqlookup\(aq, \(aqnetconf\(aq, \(aqshell\(aq, \(aqvars\(aq, \(aqmodule\(aq, \(aqstrategy\(aq, \(aqtest\(aq, \(aqfilter\(aq, \(aqrole\(aq, \(aqkeyword\(aq)
.UNINDENT
.UNINDENT
.sp
diff --git a/docs/man/man1/ansible-galaxy.1 b/docs/man/man1/ansible-galaxy.1
index 32c13c98..98be5676 100644
--- a/docs/man/man1/ansible-galaxy.1
+++ b/docs/man/man1/ansible-galaxy.1
@@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
..
-.TH "ANSIBLE-GALAXY" 1 "" "Ansible 2.14.1" "System administration commands"
+.TH "ANSIBLE-GALAXY" 1 "" "Ansible 2.14.2" "System administration commands"
.SH NAME
ansible-galaxy \- Perform various Role and Collection related operations.
.SH SYNOPSIS
@@ -35,8 +35,11 @@ ansible-galaxy \- Perform various Role and Collection related operations.
usage: ansible\-galaxy [\-h] [\-\-version] [\-v] TYPE ...
.SH DESCRIPTION
.sp
-command to manage Ansible roles in shared repositories, the default of which is
-Ansible Galaxy \fIhttps://galaxy.ansible.com\fP\&.
+Command to manage Ansible roles and collections.
+.sp
+None of the CLI tools are designed to run concurrently with themselves.
+Use an external scheduler and/or locking to ensure there are no clashing
+operations.
.SH COMMON OPTIONS
.sp
\fB\-\-version\fP
diff --git a/docs/man/man1/ansible-inventory.1 b/docs/man/man1/ansible-inventory.1
index dc09adb0..8b17cbba 100644
--- a/docs/man/man1/ansible-inventory.1
+++ b/docs/man/man1/ansible-inventory.1
@@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
..
-.TH "ANSIBLE-INVENTORY" 1 "" "Ansible 2.14.1" "System administration commands"
+.TH "ANSIBLE-INVENTORY" 1 "" "Ansible 2.14.2" "System administration commands"
.SH NAME
ansible-inventory \- None
.SH SYNOPSIS
diff --git a/docs/man/man1/ansible-playbook.1 b/docs/man/man1/ansible-playbook.1
index 872a62d7..88ab61fd 100644
--- a/docs/man/man1/ansible-playbook.1
+++ b/docs/man/man1/ansible-playbook.1
@@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
..
-.TH "ANSIBLE-PLAYBOOK" 1 "" "Ansible 2.14.1" "System administration commands"
+.TH "ANSIBLE-PLAYBOOK" 1 "" "Ansible 2.14.2" "System administration commands"
.SH NAME
ansible-playbook \- Runs Ansible playbooks, executing the defined tasks on the targeted hosts.
.SH SYNOPSIS
@@ -242,7 +242,7 @@ ask for privilege escalation password
\fB\-M\fP, \fB\-\-module\-path\fP
.INDENT 0.0
.INDENT 3.5
-prepend colon\-separated path(s) to module library (default={{ ANSIBLE_HOME ~ "/plugins/modules:/usr/share/ansible/plugins/modules" }})
+prepend colon\-separated path(s) to module library (default={{ ANSIBLE_HOME ~ \(dq/plugins/modules:/usr/share/ansible/plugins/modules\(dq }})
.UNINDENT
.UNINDENT
.sp
diff --git a/docs/man/man1/ansible-pull.1 b/docs/man/man1/ansible-pull.1
index 02947567..46baa468 100644
--- a/docs/man/man1/ansible-pull.1
+++ b/docs/man/man1/ansible-pull.1
@@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
..
-.TH "ANSIBLE-PULL" 1 "" "Ansible 2.14.1" "System administration commands"
+.TH "ANSIBLE-PULL" 1 "" "Ansible 2.14.2" "System administration commands"
.SH NAME
ansible-pull \- pulls playbooks from a VCS repo and executes them for the local host
.SH SYNOPSIS
@@ -239,7 +239,7 @@ ask for privilege escalation password
\fB\-M\fP, \fB\-\-module\-path\fP
.INDENT 0.0
.INDENT 3.5
-prepend colon\-separated path(s) to module library (default={{ ANSIBLE_HOME ~ "/plugins/modules:/usr/share/ansible/plugins/modules" }})
+prepend colon\-separated path(s) to module library (default={{ ANSIBLE_HOME ~ \(dq/plugins/modules:/usr/share/ansible/plugins/modules\(dq }})
.UNINDENT
.UNINDENT
.sp
diff --git a/docs/man/man1/ansible-vault.1 b/docs/man/man1/ansible-vault.1
index 27ec26c4..37019579 100644
--- a/docs/man/man1/ansible-vault.1
+++ b/docs/man/man1/ansible-vault.1
@@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
..
-.TH "ANSIBLE-VAULT" 1 "" "Ansible 2.14.1" "System administration commands"
+.TH "ANSIBLE-VAULT" 1 "" "Ansible 2.14.2" "System administration commands"
.SH NAME
ansible-vault \- encryption/decryption utility for Ansible data files
.SH SYNOPSIS
diff --git a/docs/man/man1/ansible.1 b/docs/man/man1/ansible.1
index add01897..5891ece1 100644
--- a/docs/man/man1/ansible.1
+++ b/docs/man/man1/ansible.1
@@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
..
-.TH "ANSIBLE" 1 "" "Ansible 2.14.1" "System administration commands"
+.TH "ANSIBLE" 1 "" "Ansible 2.14.2" "System administration commands"
.SH NAME
ansible \- Define and run a single task 'playbook' against a set of hosts
.SH SYNOPSIS
@@ -212,7 +212,7 @@ ask for privilege escalation password
\fB\-M\fP, \fB\-\-module\-path\fP
.INDENT 0.0
.INDENT 3.5
-prepend colon\-separated path(s) to module library (default={{ ANSIBLE_HOME ~ "/plugins/modules:/usr/share/ansible/plugins/modules" }})
+prepend colon\-separated path(s) to module library (default={{ ANSIBLE_HOME ~ \(dq/plugins/modules:/usr/share/ansible/plugins/modules\(dq }})
.UNINDENT
.UNINDENT
.sp
@@ -233,7 +233,7 @@ override the connection timeout in seconds (default=10)
\fB\-a\fP \(aqMODULE_ARGS\(aq, \fB\-\-args\fP \(aqMODULE_ARGS\(aq
.INDENT 0.0
.INDENT 3.5
-The action\(aqs options in space separated k=v format: \-a \(aqopt1=val1 opt2=val2\(aq or a json string: \-a \(aq{"opt1": "val1", "opt2": "val2"}\(aq
+The action\(aqs options in space separated k=v format: \-a \(aqopt1=val1 opt2=val2\(aq or a json string: \-a \(aq{\(dqopt1\(dq: \(dqval1\(dq, \(dqopt2\(dq: \(dqval2\(dq}\(aq
.UNINDENT
.UNINDENT
.sp
diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py
index f4148d92..3cb7fe2c 100755
--- a/lib/ansible/cli/galaxy.py
+++ b/lib/ansible/cli/galaxy.py
@@ -79,15 +79,11 @@ SERVER_DEF = [
# config definition fields
SERVER_ADDITIONAL = {
'v3': {'default': 'False'},
- 'validate_certs': {'default': True, 'cli': [{'name': 'validate_certs'}]},
+ 'validate_certs': {'cli': [{'name': 'validate_certs'}]},
'timeout': {'default': '60', 'cli': [{'name': 'timeout'}]},
'token': {'default': None},
}
-# override default if the generic is set
-if C.GALAXY_IGNORE_CERTS is not None:
- SERVER_ADDITIONAL['validate_certs'].update({'default': not C.GALAXY_IGNORE_CERTS})
-
def with_collection_artifacts_manager(wrapped_method):
"""Inject an artifacts manager if not passed explicitly.
@@ -100,7 +96,8 @@ def with_collection_artifacts_manager(wrapped_method):
if 'artifacts_manager' in kwargs:
return wrapped_method(*args, **kwargs)
- artifacts_manager_kwargs = {'validate_certs': context.CLIARGS['validate_certs']}
+ # FIXME: use validate_certs context from Galaxy servers when downloading collections
+ artifacts_manager_kwargs = {'validate_certs': context.CLIARGS['resolved_validate_certs']}
keyring = context.CLIARGS.get('keyring', None)
if keyring is not None:
@@ -197,7 +194,11 @@ class RoleDistributionServer:
class GalaxyCLI(CLI):
- '''command to manage Ansible roles in shared repositories, the default of which is Ansible Galaxy *https://galaxy.ansible.com*.'''
+ '''Command to manage Ansible roles and collections.
+
+ None of the CLI tools are designed to run concurrently with themselves.
+ Use an external scheduler and/or locking to ensure there are no clashing operations.
+ '''
name = 'ansible-galaxy'
@@ -584,6 +585,8 @@ class GalaxyCLI(CLI):
# ensure we have 'usable' cli option
setattr(options, 'validate_certs', (None if options.ignore_certs is None else not options.ignore_certs))
+ # the default if validate_certs is None
+ setattr(options, 'resolved_validate_certs', (options.validate_certs if options.validate_certs is not None else not C.GALAXY_IGNORE_CERTS))
display.verbosity = options.verbosity
return options
@@ -641,6 +644,8 @@ class GalaxyCLI(CLI):
token_val = server_options['token'] or NoTokenSentinel
username = server_options['username']
v3 = server_options.pop('v3')
+ if server_options['validate_certs'] is None:
+ server_options['validate_certs'] = context.CLIARGS['resolved_validate_certs']
validate_certs = server_options['validate_certs']
if v3:
@@ -676,9 +681,7 @@ class GalaxyCLI(CLI):
cmd_server = context.CLIARGS['api_server']
cmd_token = GalaxyToken(token=context.CLIARGS['api_key'])
- # resolve validate_certs
- v_config_default = True if C.GALAXY_IGNORE_CERTS is None else not C.GALAXY_IGNORE_CERTS
- validate_certs = v_config_default if context.CLIARGS['validate_certs'] is None else context.CLIARGS['validate_certs']
+ validate_certs = context.CLIARGS['resolved_validate_certs']
if cmd_server:
# Cmd args take precedence over the config entry but fist check if the arg was a name and use that config
# entry, otherwise create a new API entry for the server specified.
@@ -844,7 +847,7 @@ class GalaxyCLI(CLI):
name=coll_req['name'],
),
coll_req['source'],
- validate_certs=not context.CLIARGS['ignore_certs'],
+ validate_certs=context.CLIARGS['resolved_validate_certs'],
),
)
diff --git a/lib/ansible/cli/inventory.py b/lib/ansible/cli/inventory.py
index 0e860801..e8ed75e4 100755
--- a/lib/ansible/cli/inventory.py
+++ b/lib/ansible/cli/inventory.py
@@ -13,7 +13,6 @@ from ansible.cli import CLI
import sys
import argparse
-from operator import attrgetter
from ansible import constants as C
from ansible import context
@@ -273,11 +272,11 @@ class InventoryCLI(CLI):
result = [self._graph_name('@%s:' % group.name, depth)]
depth = depth + 1
- for kid in sorted(group.child_groups, key=attrgetter('name')):
+ for kid in group.child_groups:
result.extend(self._graph_group(kid, depth))
if group.name != 'all':
- for host in sorted(group.hosts, key=attrgetter('name')):
+ for host in group.hosts:
result.append(self._graph_name(host.name, depth))
if context.CLIARGS['show_vars']:
result.extend(self._show_vars(self._get_host_variables(host), depth + 1))
@@ -303,9 +302,9 @@ class InventoryCLI(CLI):
results = {}
results[group.name] = {}
if group.name != 'all':
- results[group.name]['hosts'] = [h.name for h in sorted(group.hosts, key=attrgetter('name'))]
+ results[group.name]['hosts'] = [h.name for h in group.hosts]
results[group.name]['children'] = []
- for subgroup in sorted(group.child_groups, key=attrgetter('name')):
+ for subgroup in group.child_groups:
results[group.name]['children'].append(subgroup.name)
if subgroup.name not in seen:
results.update(format_group(subgroup))
@@ -343,14 +342,14 @@ class InventoryCLI(CLI):
# subgroups
results[group.name]['children'] = {}
- for subgroup in sorted(group.child_groups, key=attrgetter('name')):
+ for subgroup in group.child_groups:
if subgroup.name != 'all':
results[group.name]['children'].update(format_group(subgroup))
# hosts for group
results[group.name]['hosts'] = {}
if group.name != 'all':
- for h in sorted(group.hosts, key=attrgetter('name')):
+ for h in group.hosts:
myvars = {}
if h.name not in seen: # avoid defining host vars more than once
seen.append(h.name)
@@ -377,7 +376,7 @@ class InventoryCLI(CLI):
results[group.name] = {}
results[group.name]['children'] = []
- for subgroup in sorted(group.child_groups, key=attrgetter('name')):
+ for subgroup in group.child_groups:
if subgroup.name == 'ungrouped' and not has_ungrouped:
continue
if group.name != 'all':
@@ -385,7 +384,7 @@ class InventoryCLI(CLI):
results.update(format_group(subgroup))
if group.name != 'all':
- for host in sorted(group.hosts, key=attrgetter('name')):
+ for host in group.hosts:
if host.name not in seen:
seen.add(host.name)
host_vars = self._get_host_variables(host=host)
diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py
index 6049b236..24497821 100644
--- a/lib/ansible/executor/play_iterator.py
+++ b/lib/ansible/executor/play_iterator.py
@@ -571,7 +571,7 @@ class PlayIterator:
Given the current HostState state, determines if the current block, or any child blocks,
are in rescue mode.
'''
- if state.get_current_block().rescue:
+ if state.run_state == IteratingStates.TASKS and state.get_current_block().rescue:
return True
if state.tasks_child_state is not None:
return self.is_any_block_rescuing(state.tasks_child_state)
diff --git a/lib/ansible/module_utils/ansible_release.py b/lib/ansible/module_utils/ansible_release.py
index a38538f5..7a2e435b 100644
--- a/lib/ansible/module_utils/ansible_release.py
+++ b/lib/ansible/module_utils/ansible_release.py
@@ -19,6 +19,6 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-__version__ = '2.14.1'
+__version__ = '2.14.2'
__author__ = 'Ansible, Inc.'
__codename__ = "C'mon Everybody"
diff --git a/lib/ansible/module_utils/common/arg_spec.py b/lib/ansible/module_utils/common/arg_spec.py
index 9fa2b4d2..d9f716ef 100644
--- a/lib/ansible/module_utils/common/arg_spec.py
+++ b/lib/ansible/module_utils/common/arg_spec.py
@@ -12,6 +12,7 @@ from ansible.module_utils.common.parameters import (
_get_legal_inputs,
_get_unsupported_parameters,
_handle_aliases,
+ _list_deprecations,
_list_no_log_values,
_set_defaults,
_validate_argument_types,
@@ -31,6 +32,7 @@ from ansible.module_utils.common.validation import (
from ansible.module_utils.errors import (
AliasError,
AnsibleValidationErrorMultiple,
+ DeprecationError,
MutuallyExclusiveError,
NoLogError,
RequiredDefaultError,
@@ -58,6 +60,7 @@ class ValidationResult:
"""
self._unsupported_parameters = set()
+ self._supported_parameters = dict()
self._validated_parameters = deepcopy(parameters)
self._deprecations = []
self._warnings = []
@@ -192,7 +195,7 @@ class ArgumentSpecValidator:
for deprecation in alias_deprecations:
result._deprecations.append({
- 'name': deprecation['name'],
+ 'msg': "Alias '%s' is deprecated. See the module docs for more information" % deprecation['name'],
'version': deprecation.get('version'),
'date': deprecation.get('date'),
'collection_name': deprecation.get('collection_name'),
@@ -204,7 +207,19 @@ class ArgumentSpecValidator:
result.errors.append(NoLogError(to_native(te)))
try:
- result._unsupported_parameters.update(_get_unsupported_parameters(self.argument_spec, result._validated_parameters, legal_inputs))
+ result._deprecations.extend(_list_deprecations(self.argument_spec, result._validated_parameters))
+ except TypeError as te:
+ result.errors.append(DeprecationError(to_native(te)))
+
+ try:
+ result._unsupported_parameters.update(
+ _get_unsupported_parameters(
+ self.argument_spec,
+ result._validated_parameters,
+ legal_inputs,
+ store_supported=result._supported_parameters,
+ )
+ )
except TypeError as te:
result.errors.append(RequiredDefaultError(to_native(te)))
except ValueError as ve:
@@ -233,10 +248,20 @@ class ArgumentSpecValidator:
result._no_log_values.update(_set_defaults(self.argument_spec, result._validated_parameters))
+ alias_deprecations = []
_validate_sub_spec(self.argument_spec, result._validated_parameters,
errors=result.errors,
no_log_values=result._no_log_values,
- unsupported_parameters=result._unsupported_parameters)
+ unsupported_parameters=result._unsupported_parameters,
+ supported_parameters=result._supported_parameters,
+ alias_deprecations=alias_deprecations,)
+ for deprecation in alias_deprecations:
+ result._deprecations.append({
+ 'msg': "Alias '%s' is deprecated. See the module docs for more information" % deprecation['name'],
+ 'version': deprecation.get('version'),
+ 'date': deprecation.get('date'),
+ 'collection_name': deprecation.get('collection_name'),
+ })
if result._unsupported_parameters:
flattened_names = []
@@ -247,9 +272,17 @@ class ArgumentSpecValidator:
flattened_names.append(item)
unsupported_string = ", ".join(sorted(list(flattened_names)))
- supported_string = ", ".join(self._valid_parameter_names)
- result.errors.append(
- UnsupportedError("{0}. Supported parameters include: {1}.".format(unsupported_string, supported_string)))
+ supported_params = supported_aliases = []
+ if result._supported_parameters.get(item):
+ supported_params = sorted(list(result._supported_parameters[item][0]))
+ supported_aliases = sorted(list(result._supported_parameters[item][1]))
+ supported_string = ", ".join(supported_params)
+ if supported_aliases:
+ aliases_string = ", ".join(supported_aliases)
+ supported_string += " (%s)" % aliases_string
+
+ msg = "{0}. Supported parameters include: {1}.".format(unsupported_string, supported_string)
+ result.errors.append(UnsupportedError(msg))
return result
@@ -268,7 +301,7 @@ class ModuleArgumentSpecValidator(ArgumentSpecValidator):
result = super(ModuleArgumentSpecValidator, self).validate(parameters)
for d in result._deprecations:
- deprecate("Alias '{name}' is deprecated. See the module docs for more information".format(name=d['name']),
+ deprecate(d['msg'],
version=d.get('version'), date=d.get('date'),
collection_name=d.get('collection_name'))
diff --git a/lib/ansible/module_utils/common/parameters.py b/lib/ansible/module_utils/common/parameters.py
index 93b80431..059ca0af 100644
--- a/lib/ansible/module_utils/common/parameters.py
+++ b/lib/ansible/module_utils/common/parameters.py
@@ -154,7 +154,7 @@ def _get_legal_inputs(argument_spec, parameters, aliases=None):
return list(aliases.keys()) + list(argument_spec.keys())
-def _get_unsupported_parameters(argument_spec, parameters, legal_inputs=None, options_context=None):
+def _get_unsupported_parameters(argument_spec, parameters, legal_inputs=None, options_context=None, store_supported=None):
"""Check keys in parameters against those provided in legal_inputs
to ensure they contain legal values. If legal_inputs are not supplied,
they will be generated using the argument_spec.
@@ -182,6 +182,16 @@ def _get_unsupported_parameters(argument_spec, parameters, legal_inputs=None, op
unsupported_parameters.add(context)
+ if store_supported is not None:
+ supported_aliases = _handle_aliases(argument_spec, parameters)
+ supported_params = []
+ for option in legal_inputs:
+ if option in supported_aliases:
+ continue
+ supported_params.append(option)
+
+ store_supported.update({context: (supported_params, supported_aliases)})
+
return unsupported_parameters
@@ -686,7 +696,17 @@ def _validate_argument_values(argument_spec, parameters, options_context=None, e
errors.append(ArgumentTypeError(msg))
-def _validate_sub_spec(argument_spec, parameters, prefix='', options_context=None, errors=None, no_log_values=None, unsupported_parameters=None):
+def _validate_sub_spec(
+ argument_spec,
+ parameters,
+ prefix="",
+ options_context=None,
+ errors=None,
+ no_log_values=None,
+ unsupported_parameters=None,
+ supported_parameters=None,
+ alias_deprecations=None,
+):
"""Validate sub argument spec.
This function is recursive.
@@ -703,6 +723,8 @@ def _validate_sub_spec(argument_spec, parameters, prefix='', options_context=Non
if unsupported_parameters is None:
unsupported_parameters = set()
+ if supported_parameters is None:
+ supported_parameters = dict()
for param, value in argument_spec.items():
wanted = value.get('type')
@@ -740,15 +762,24 @@ def _validate_sub_spec(argument_spec, parameters, prefix='', options_context=Non
new_prefix += '.'
alias_warnings = []
- alias_deprecations = []
+ alias_deprecations_sub = []
try:
- options_aliases = _handle_aliases(sub_spec, sub_parameters, alias_warnings, alias_deprecations)
+ options_aliases = _handle_aliases(sub_spec, sub_parameters, alias_warnings, alias_deprecations_sub)
except (TypeError, ValueError) as e:
options_aliases = {}
errors.append(AliasError(to_native(e)))
for option, alias in alias_warnings:
- warn('Both option %s and its alias %s are set.' % (option, alias))
+ warn('Both option %s%s and its alias %s%s are set.' % (new_prefix, option, new_prefix, alias))
+
+ if alias_deprecations is not None:
+ for deprecation in alias_deprecations_sub:
+ alias_deprecations.append({
+ 'name': '%s%s' % (new_prefix, deprecation['name']),
+ 'version': deprecation.get('version'),
+ 'date': deprecation.get('date'),
+ 'collection_name': deprecation.get('collection_name'),
+ })
try:
no_log_values.update(_list_no_log_values(sub_spec, sub_parameters))
@@ -756,7 +787,15 @@ def _validate_sub_spec(argument_spec, parameters, prefix='', options_context=Non
errors.append(NoLogError(to_native(te)))
legal_inputs = _get_legal_inputs(sub_spec, sub_parameters, options_aliases)
- unsupported_parameters.update(_get_unsupported_parameters(sub_spec, sub_parameters, legal_inputs, options_context))
+ unsupported_parameters.update(
+ _get_unsupported_parameters(
+ sub_spec,
+ sub_parameters,
+ legal_inputs,
+ options_context,
+ store_supported=supported_parameters,
+ )
+ )
try:
check_mutually_exclusive(value.get('mutually_exclusive'), sub_parameters, options_context)
@@ -782,7 +821,9 @@ def _validate_sub_spec(argument_spec, parameters, prefix='', options_context=Non
no_log_values.update(_set_defaults(sub_spec, sub_parameters))
# Handle nested specs
- _validate_sub_spec(sub_spec, sub_parameters, new_prefix, options_context, errors, no_log_values, unsupported_parameters)
+ _validate_sub_spec(
+ sub_spec, sub_parameters, new_prefix, options_context, errors, no_log_values,
+ unsupported_parameters, supported_parameters, alias_deprecations)
options_context.pop()
diff --git a/lib/ansible/module_utils/errors.py b/lib/ansible/module_utils/errors.py
index 3274b85b..cbbd86c0 100644
--- a/lib/ansible/module_utils/errors.py
+++ b/lib/ansible/module_utils/errors.py
@@ -75,6 +75,10 @@ class ArgumentValueError(AnsibleValidationError):
"""Error with parameter value"""
+class DeprecationError(AnsibleValidationError):
+ """Error processing parameter deprecations"""
+
+
class ElementError(AnsibleValidationError):
"""Error when validating elements"""
diff --git a/lib/ansible/modules/command.py b/lib/ansible/modules/command.py
index 778d8a26..490c0ca5 100644
--- a/lib/ansible/modules/command.py
+++ b/lib/ansible/modules/command.py
@@ -21,7 +21,7 @@ description:
like C("*"), C("<"), C(">"), C("|"), C(";") and C("&") will not work.
Use the M(ansible.builtin.shell) module if you need these features.
- To create C(command) tasks that are easier to read than the ones using space-delimited
- arguments, pass parameters using the C(args) L(task keyword,../reference_appendices/playbooks_keywords.html#task)
+ arguments, pass parameters using the C(args) L(task keyword,https://docs.ansible.com/ansible/latest/reference_appendices/playbooks_keywords.html#task)
or use C(cmd) parameter.
- Either a free form command or C(cmd) parameter is required, see the examples.
- For Windows targets, use the M(ansible.windows.win_command) module instead.
diff --git a/lib/ansible/modules/file.py b/lib/ansible/modules/file.py
index b691d3d9..72b510c3 100644
--- a/lib/ansible/modules/file.py
+++ b/lib/ansible/modules/file.py
@@ -553,34 +553,38 @@ def execute_touch(path, follow, timestamps):
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
- if not module.check_mode:
- if prev_state == 'absent':
- # Create an empty file if the filename did not already exist
- try:
- open(b_path, 'wb').close()
- changed = True
- except (OSError, IOError) as e:
- raise AnsibleModuleError(results={'msg': 'Error, could not touch target: %s'
- % to_native(e, nonstring='simplerepr'),
- 'path': path})
-
- # Update the attributes on the file
- diff = initial_diff(path, 'touch', prev_state)
- file_args = module.load_file_common_arguments(module.params)
+ # If the file did not already exist
+ if prev_state == 'absent':
+ # if we are in check mode and the file is absent
+ # we can set the changed status to True and return
+ if module.check_mode:
+ result['changed'] = True
+ return result
+ # Create an empty file
try:
- changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
- changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
- except SystemExit as e:
- if e.code: # this is the exit code passed to sys.exit, not a constant -- pylint: disable=using-constant-test
- # We take this to mean that fail_json() was called from
- # somewhere in basic.py
- if prev_state == 'absent':
- # If we just created the file we can safely remove it
- os.remove(b_path)
- raise
-
- result['changed'] = changed
- result['diff'] = diff
+ open(b_path, 'wb').close()
+ changed = True
+ except (OSError, IOError) as e:
+ raise AnsibleModuleError(results={'msg': 'Error, could not touch target: %s'
+ % to_native(e, nonstring='simplerepr'),
+ 'path': path})
+ # Update the attributes on the file
+ diff = initial_diff(path, 'touch', prev_state)
+ file_args = module.load_file_common_arguments(module.params)
+ try:
+ changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
+ changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
+ except SystemExit as e:
+ if e.code: # this is the exit code passed to sys.exit, not a constant -- pylint: disable=using-constant-test
+ # We take this to mean that fail_json() was called from
+ # somewhere in basic.py
+ if prev_state == 'absent':
+ # If we just created the file we can safely remove it
+ os.remove(b_path)
+ raise
+
+ result['changed'] = changed
+ result['diff'] = diff
return result
diff --git a/lib/ansible/modules/get_url.py b/lib/ansible/modules/get_url.py
index 4cf27159..eec24241 100644
--- a/lib/ansible/modules/get_url.py
+++ b/lib/ansible/modules/get_url.py
@@ -609,7 +609,7 @@ def main():
start = datetime.datetime.utcnow()
method = 'HEAD' if module.check_mode else 'GET'
tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, timeout, headers, tmp_dest, method,
- unredirected_headers=unredirected_headers, decompress=decompress, use_netrc=use_netrc)
+ unredirected_headers=unredirected_headers, decompress=decompress, ciphers=ciphers, use_netrc=use_netrc)
result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
result['src'] = tmpsrc
diff --git a/lib/ansible/modules/setup.py b/lib/ansible/modules/setup.py
index 2776ac1c..df2a67f2 100644
--- a/lib/ansible/modules/setup.py
+++ b/lib/ansible/modules/setup.py
@@ -133,7 +133,7 @@ EXAMPLES = r"""
- '!<any valid subset>'
- facter
-- name: Collect only selected facts
+- name: Filter and return only selected facts
ansible.builtin.setup:
filter:
- 'ansible_distribution'
diff --git a/lib/ansible/modules/systemd.py b/lib/ansible/modules/systemd.py
index 4cd323b9..3580fa5e 100644
--- a/lib/ansible/modules/systemd.py
+++ b/lib/ansible/modules/systemd.py
@@ -108,7 +108,7 @@ EXAMPLES = '''
- name: Restart service cron on centos, in all cases, also issue daemon-reload to pick up config changes
ansible.builtin.systemd:
state: restarted
- daemon_reload: yes
+ daemon_reload: true
name: crond
- name: Reload service httpd, in all cases
@@ -119,22 +119,22 @@ EXAMPLES = '''
- name: Enable service httpd and ensure it is not masked
ansible.builtin.systemd:
name: httpd
- enabled: yes
+ enabled: true
masked: no
- name: Enable a timer unit for dnf-automatic
ansible.builtin.systemd:
name: dnf-automatic.timer
state: started
- enabled: yes
+ enabled: true
- name: Just force systemd to reread configs (2.4 and above)
ansible.builtin.systemd:
- daemon_reload: yes
+ daemon_reload: true
- name: Just force systemd to re-execute itself (2.8 and above)
ansible.builtin.systemd:
- daemon_reexec: yes
+ daemon_reexec: true
- name: Run a user service when XDG_RUNTIME_DIR is not set on remote login
ansible.builtin.systemd:
diff --git a/lib/ansible/modules/systemd_service.py b/lib/ansible/modules/systemd_service.py
index 4cd323b9..3580fa5e 100644
--- a/lib/ansible/modules/systemd_service.py
+++ b/lib/ansible/modules/systemd_service.py
@@ -108,7 +108,7 @@ EXAMPLES = '''
- name: Restart service cron on centos, in all cases, also issue daemon-reload to pick up config changes
ansible.builtin.systemd:
state: restarted
- daemon_reload: yes
+ daemon_reload: true
name: crond
- name: Reload service httpd, in all cases
@@ -119,22 +119,22 @@ EXAMPLES = '''
- name: Enable service httpd and ensure it is not masked
ansible.builtin.systemd:
name: httpd
- enabled: yes
+ enabled: true
masked: no
- name: Enable a timer unit for dnf-automatic
ansible.builtin.systemd:
name: dnf-automatic.timer
state: started
- enabled: yes
+ enabled: true
- name: Just force systemd to reread configs (2.4 and above)
ansible.builtin.systemd:
- daemon_reload: yes
+ daemon_reload: true
- name: Just force systemd to re-execute itself (2.8 and above)
ansible.builtin.systemd:
- daemon_reexec: yes
+ daemon_reexec: true
- name: Run a user service when XDG_RUNTIME_DIR is not set on remote login
ansible.builtin.systemd:
diff --git a/lib/ansible/modules/uri.py b/lib/ansible/modules/uri.py
index 958aefc9..c0021364 100644
--- a/lib/ansible/modules/uri.py
+++ b/lib/ansible/modules/uri.py
@@ -447,7 +447,7 @@ from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.common._collections_compat import Mapping, Sequence
from ansible.module_utils.urls import fetch_url, get_response_filename, parse_content_type, prepare_multipart, url_argument_spec
-JSON_CANDIDATES = ('text', 'json', 'javascript')
+JSON_CANDIDATES = {'json', 'javascript'}
# List of response key names we do not want sanitize_keys() to change.
NO_MODIFY_KEYS = frozenset(
@@ -695,11 +695,11 @@ def main():
content_type, main_type, sub_type, content_encoding = parse_content_type(r)
else:
content_type = 'application/octet-stream'
- main_type = 'aplication'
+ main_type = 'application'
sub_type = 'octet-stream'
content_encoding = 'utf-8'
- maybe_json = content_type and any(candidate in sub_type for candidate in JSON_CANDIDATES)
+ maybe_json = content_type and sub_type.lower() in JSON_CANDIDATES
maybe_output = maybe_json or return_content or info['status'] not in status_code
if maybe_output:
diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py
index 2e3d3641..d2b3df9a 100644
--- a/lib/ansible/plugins/action/template.py
+++ b/lib/ansible/plugins/action/template.py
@@ -118,7 +118,11 @@ class ActionModule(ActionBase):
searchpath = newsearchpath
# add ansible 'template' vars
- temp_vars = task_vars | generate_ansible_template_vars(self._task.args.get('src', None), source, dest)
+ temp_vars = task_vars.copy()
+ # NOTE in the case of ANSIBLE_DEBUG=1 task_vars is VarsWithSources(MutableMapping)
+ # so | operator cannot be used as it can be used only on dicts
+ # https://peps.python.org/pep-0584/#what-about-mapping-and-mutablemapping
+ temp_vars.update(generate_ansible_template_vars(self._task.args.get('src', None), source, dest))
# force templar to use AnsibleEnvironment to prevent issues with native types
# https://github.com/ansible/ansible/issues/46169
diff --git a/lib/ansible/plugins/action/validate_argument_spec.py b/lib/ansible/plugins/action/validate_argument_spec.py
index 8c4432d1..dc7d6cb3 100644
--- a/lib/ansible/plugins/action/validate_argument_spec.py
+++ b/lib/ansible/plugins/action/validate_argument_spec.py
@@ -79,7 +79,7 @@ class ActionModule(ActionBase):
args_from_vars = self.get_args_from_task_vars(argument_spec_data, task_vars)
validator = ArgumentSpecValidator(argument_spec_data)
- validation_result = validator.validate(combine_vars(args_from_vars, provided_arguments))
+ validation_result = validator.validate(combine_vars(args_from_vars, provided_arguments), validate_role_argument_spec=True)
if validation_result.error_messages:
result['failed'] = True
diff --git a/lib/ansible/plugins/connection/local.py b/lib/ansible/plugins/connection/local.py
index 182e21cd..27afd105 100644
--- a/lib/ansible/plugins/connection/local.py
+++ b/lib/ansible/plugins/connection/local.py
@@ -18,12 +18,12 @@ DOCUMENTATION = '''
- The remote user is ignored, the user with which the ansible CLI was executed is used instead.
'''
+import fcntl
+import getpass
import os
import pty
import shutil
import subprocess
-import fcntl
-import getpass
import ansible.constants as C
from ansible.errors import AnsibleError, AnsibleFileNotFound
@@ -47,7 +47,11 @@ class Connection(ConnectionBase):
super(Connection, self).__init__(*args, **kwargs)
self.cwd = None
- self.default_user = getpass.getuser()
+ try:
+ self.default_user = getpass.getuser()
+ except KeyError:
+ display.vv("Current user (uid=%s) does not seem to exist on this system, leaving user empty." % os.getuid())
+ self.default_user = ""
def _connect(self):
''' connect to the local host; nothing to do here '''
diff --git a/lib/ansible/plugins/doc_fragments/validate.py b/lib/ansible/plugins/doc_fragments/validate.py
index f77514d2..ac66d253 100644
--- a/lib/ansible/plugins/doc_fragments/validate.py
+++ b/lib/ansible/plugins/doc_fragments/validate.py
@@ -15,7 +15,7 @@ options:
- The validation command to run before copying the updated file into the final destination.
- A temporary file path is used to validate, passed in through '%s' which must be present as in the examples below.
- Also, the command is passed securely so shell features such as expansion and pipes will not work.
- - For an example on how to handle more complex validation than what this option provides,
- see L(Complex configuration validation,https://docs.ansible.com/ansible/devel/reference_appendices/faq.html).
+ - For an example on how to handle more complex validation than what this
+ option provides, see R(handling complex validation,complex_configuration_validation).
type: str
'''
diff --git a/lib/ansible/plugins/filter/product.yml b/lib/ansible/plugins/filter/product.yml
index c558e4e4..50355228 100644
--- a/lib/ansible/plugins/filter/product.yml
+++ b/lib/ansible/plugins/filter/product.yml
@@ -30,7 +30,7 @@ EXAMPLES: |
repeat_original: "{{ [1,2] | product(repeat=2) }}"
# repeat_product => [ [ 1, "a", 1, "a" ], [ 1, "a", 1, "b" ], [ 1, "a", 2, "a" ], [ 1, "a", 2, "b" ], [ 1, "b", 1, "a" ], [ 1, "b", 1, "b" ], [ 1, "b", 2, "a" ], [ 1, "b", 2, "b" ], [ 2, "a", 1, "a" ], [ 2, "a", 1, "b" ], [ 2, "a", 2, "a" ], [ 2, "a", 2, "b" ], [ 2, "b", 1, "a" ], [ 2, "b", 1, "b" ], [ 2, "b", 2, "a" ], [ 2, "b", 2, "b" ] ]
- repeat_product: "{{ [1,2] | product(['a', 'b']) }}"
+ repeat_product: "{{ [1,2] | product(['a', 'b'], repeat=2) }}"
# domains => [ 'example.com', 'ansible.com', 'redhat.com' ]
domains: "{{ [ 'example', 'ansible', 'redhat'] | product(['com']) | map('join', '.') }}"
diff --git a/lib/ansible/plugins/filter/regex_replace.yml b/lib/ansible/plugins/filter/regex_replace.yml
index 47c2eb3b..0277b560 100644
--- a/lib/ansible/plugins/filter/regex_replace.yml
+++ b/lib/ansible/plugins/filter/regex_replace.yml
@@ -5,7 +5,7 @@ DOCUMENTATION:
description:
- Replace a substring defined by a regular expression with another defined by another regular expression based on the first match.
notes:
- - Maps to Python's C(regex.replace).
+ - Maps to Python's C(re.replace).
positional: _input, _regex_match, _regex_replace
options:
_input:
diff --git a/lib/ansible/plugins/filter/regex_search.yml b/lib/ansible/plugins/filter/regex_search.yml
index b459c936..c61efb76 100644
--- a/lib/ansible/plugins/filter/regex_search.yml
+++ b/lib/ansible/plugins/filter/regex_search.yml
@@ -5,7 +5,7 @@ DOCUMENTATION:
description:
- Search in a string to extract the part that matches the regular expression.
notes:
- - Maps to Python's C(regex.search).
+ - Maps to Python's C(re.search).
positional: _input, _regex
options:
_input:
diff --git a/lib/ansible/plugins/list.py b/lib/ansible/plugins/list.py
index 075225f8..e09b293f 100644
--- a/lib/ansible/plugins/list.py
+++ b/lib/ansible/plugins/list.py
@@ -112,10 +112,7 @@ def _list_plugins_from_paths(ptype, dirs, collection, depth=0):
def _list_j2_plugins_from_file(collection, plugin_path, ptype, plugin_name):
ploader = getattr(loader, '{0}_loader'.format(ptype))
- if collection in ('ansible.builtin', 'ansible.legacy'):
- file_plugins = ploader.all()
- else:
- file_plugins = ploader.get_contained_plugins(collection, plugin_path, plugin_name)
+ file_plugins = ploader.get_contained_plugins(collection, plugin_path, plugin_name)
return file_plugins
diff --git a/lib/ansible/plugins/loader.py b/lib/ansible/plugins/loader.py
index d09138b1..845fdcd0 100644
--- a/lib/ansible/plugins/loader.py
+++ b/lib/ansible/plugins/loader.py
@@ -1389,7 +1389,8 @@ def _load_plugin_filter():
# Modules and action plugins share the same blacklist since the difference between the
# two isn't visible to the users
try:
- filters['ansible.modules'] = frozenset(filter_data['module_blacklist'])
+ # reject list was documented but we never changed the code from blacklist, will be deprected in 2.15
+ filters['ansible.modules'] = frozenset(filter_data.get('module_rejectlist)', filter_data['module_blacklist']))
except TypeError:
display.warning(u'Unable to parse the plugin filter file {0} as'
u' module_blacklist is not a list.'
diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py
index 2f04a3f7..e3c4b02d 100644
--- a/lib/ansible/plugins/strategy/__init__.py
+++ b/lib/ansible/plugins/strategy/__init__.py
@@ -551,6 +551,8 @@ class StrategyBase:
role_ran = True
ignore_errors = original_task.ignore_errors
if not ignore_errors:
+ # save the current state before failing it for later inspection
+ state_when_failed = iterator.get_state_for_host(original_host.name)
display.debug("marking %s as failed" % original_host.name)
if original_task.run_once:
# if we're using run_once, we have to fail every host here
@@ -568,7 +570,7 @@ class StrategyBase:
# if we're iterating on the rescue portion of a block then
# we save the failed task in a special var for use
# within the rescue/always
- if iterator.is_any_block_rescuing(state):
+ if iterator.is_any_block_rescuing(state_when_failed):
self._tqm._stats.increment('rescued', original_host.name)
iterator._play._removed_hosts.remove(original_host.name)
self._variable_manager.set_nonpersistent_facts(
diff --git a/lib/ansible/release.py b/lib/ansible/release.py
index a38538f5..7a2e435b 100644
--- a/lib/ansible/release.py
+++ b/lib/ansible/release.py
@@ -19,6 +19,6 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-__version__ = '2.14.1'
+__version__ = '2.14.2'
__author__ = 'Ansible, Inc.'
__codename__ = "C'mon Everybody"
diff --git a/lib/ansible_core.egg-info/PKG-INFO b/lib/ansible_core.egg-info/PKG-INFO
index 373b0ae4..b3beba17 100644
--- a/lib/ansible_core.egg-info/PKG-INFO
+++ b/lib/ansible_core.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 2.1
Name: ansible-core
-Version: 2.14.1
+Version: 2.14.2
Summary: Radically simple IT automation
Home-page: https://ansible.com/
Author: Ansible, Inc.
@@ -29,6 +29,7 @@ Classifier: Topic :: System :: Installation/Setup
Classifier: Topic :: System :: Systems Administration
Classifier: Topic :: Utilities
Requires-Python: >=3.9
+Description-Content-Type: text/x-rst
License-File: COPYING
|PyPI version| |Docs badge| |Chat badge| |Build Status| |Code Of Conduct| |Mailing Lists| |License| |CII Best Practices|
diff --git a/lib/ansible_core.egg-info/SOURCES.txt b/lib/ansible_core.egg-info/SOURCES.txt
index e97472c7..54f63989 100644
--- a/lib/ansible_core.egg-info/SOURCES.txt
+++ b/lib/ansible_core.egg-info/SOURCES.txt
@@ -27,16 +27,9 @@ docs/docsite/.nojekyll
docs/docsite/Makefile
docs/docsite/Makefile.sphinx
docs/docsite/README.md
-docs/docsite/ansible_2_10.inv
-docs/docsite/ansible_2_5.inv
-docs/docsite/ansible_2_6.inv
-docs/docsite/ansible_2_7.inv
-docs/docsite/ansible_2_8.inv
docs/docsite/ansible_2_9.inv
-docs/docsite/ansible_3.inv
-docs/docsite/ansible_4.inv
-docs/docsite/ansible_5.inv
docs/docsite/ansible_6.inv
+docs/docsite/ansible_7.inv
docs/docsite/collection-plugins.yml
docs/docsite/jinja2.inv
docs/docsite/known_good_reqs.txt
@@ -140,12 +133,12 @@ docs/docsite/rst/dev_guide/developing_rebasing.rst
docs/docsite/rst/dev_guide/migrating_roles.rst
docs/docsite/rst/dev_guide/module_lifecycle.rst
docs/docsite/rst/dev_guide/overview_architecture.rst
+docs/docsite/rst/dev_guide/sidecar.rst
docs/docsite/rst/dev_guide/testing.rst
docs/docsite/rst/dev_guide/testing_compile.rst
docs/docsite/rst/dev_guide/testing_documentation.rst
docs/docsite/rst/dev_guide/testing_httptester.rst
docs/docsite/rst/dev_guide/testing_integration.rst
-docs/docsite/rst/dev_guide/testing_integration_legacy.rst
docs/docsite/rst/dev_guide/testing_pep8.rst
docs/docsite/rst/dev_guide/testing_running_locally.rst
docs/docsite/rst/dev_guide/testing_sanity.rst
@@ -1602,6 +1595,9 @@ test/integration/targets/ansible-test-config-invalid/ansible_collections/ns/col/
test/integration/targets/ansible-test-config/ansible_collections/ns/col/plugins/module_utils/test.py
test/integration/targets/ansible-test-config/ansible_collections/ns/col/tests/config.yml
test/integration/targets/ansible-test-config/ansible_collections/ns/col/tests/unit/plugins/module_utils/test_test.py
+test/integration/targets/ansible-test-container/aliases
+test/integration/targets/ansible-test-container/runme.py
+test/integration/targets/ansible-test-container/runme.sh
test/integration/targets/ansible-test-coverage/aliases
test/integration/targets/ansible-test-coverage/runme.sh
test/integration/targets/ansible-test-coverage/ansible_collections/ns/col/plugins/module_utils/test_util.py
@@ -1684,14 +1680,24 @@ test/integration/targets/ansible-test-sanity-shebang/ansible_collections/ns/col/
test/integration/targets/ansible-test-sanity-shebang/ansible_collections/ns/col/tests/integration/targets/valid/env_python.py
test/integration/targets/ansible-test-sanity-shebang/ansible_collections/ns/col/tests/integration/targets/valid/sh.sh
test/integration/targets/ansible-test-sanity-validate-modules/aliases
+test/integration/targets/ansible-test-sanity-validate-modules/expected.txt
test/integration/targets/ansible-test-sanity-validate-modules/runme.sh
+test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/col/plugins/modules/invalid_yaml_syntax.py
test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/col/plugins/modules/no_callable.py
test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/col/plugins/modules/sidecar.py
test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/col/plugins/modules/sidecar.yaml
+test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/README.rst
+test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/galaxy.yml
+test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/meta/main.yml
+test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/plugins/modules/failure_ps.ps1
+test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/plugins/modules/failure_ps.yml
test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/README.rst
test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/galaxy.yml
test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/meta/runtime.yml
+test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/plugins/module_utils/share_module.psm1
test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/plugins/module_utils/validate.psm1
+test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/plugins/modules/in_function.ps1
+test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/plugins/modules/in_function.yml
test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/plugins/modules/sidecar.ps1
test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/plugins/modules/sidecar.yml
test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/plugins/modules/validate.ps1
@@ -1921,6 +1927,7 @@ test/integration/targets/blocks/69848.yml
test/integration/targets/blocks/72725.yml
test/integration/targets/blocks/72781.yml
test/integration/targets/blocks/78612.yml
+test/integration/targets/blocks/79711.yml
test/integration/targets/blocks/aliases
test/integration/targets/blocks/always_failure_no_rescue_rc.yml
test/integration/targets/blocks/always_failure_with_rescue_rc.yml
@@ -4253,9 +4260,11 @@ test/integration/targets/var_reserved/aliases
test/integration/targets/var_reserved/reserved_varname_warning.yml
test/integration/targets/var_reserved/runme.sh
test/integration/targets/var_templating/aliases
+test/integration/targets/var_templating/ansible_debug_template.j2
test/integration/targets/var_templating/runme.sh
test/integration/targets/var_templating/task_vars_templating.yml
test/integration/targets/var_templating/test_connection_vars.yml
+test/integration/targets/var_templating/test_vars_with_sources.yml
test/integration/targets/var_templating/undall.yml
test/integration/targets/var_templating/undefined.yml
test/integration/targets/var_templating/group_vars/all.yml
@@ -4412,6 +4421,7 @@ test/lib/ansible_test/_internal/ansible_util.py
test/lib/ansible_test/_internal/become.py
test/lib/ansible_test/_internal/bootstrap.py
test/lib/ansible_test/_internal/cache.py
+test/lib/ansible_test/_internal/cgroup.py
test/lib/ansible_test/_internal/completion.py
test/lib/ansible_test/_internal/config.py
test/lib/ansible_test/_internal/connections.py
@@ -4552,6 +4562,8 @@ test/lib/ansible_test/_internal/commands/units/__init__.py
test/lib/ansible_test/_internal/compat/__init__.py
test/lib/ansible_test/_internal/compat/packaging.py
test/lib/ansible_test/_internal/compat/yaml.py
+test/lib/ansible_test/_internal/dev/__init__.py
+test/lib/ansible_test/_internal/dev/container_probe.py
test/lib/ansible_test/_internal/provider/__init__.py
test/lib/ansible_test/_internal/provider/layout/__init__.py
test/lib/ansible_test/_internal/provider/layout/ansible.py
@@ -4651,6 +4663,8 @@ test/lib/ansible_test/_util/target/sanity/compile/compile.py
test/lib/ansible_test/_util/target/sanity/import/importer.py
test/lib/ansible_test/_util/target/setup/ConfigureRemotingForAnsible.ps1
test/lib/ansible_test/_util/target/setup/bootstrap.sh
+test/lib/ansible_test/_util/target/setup/check_systemd_cgroup_v1.sh
+test/lib/ansible_test/_util/target/setup/probe_cgroups.py
test/lib/ansible_test/_util/target/setup/quiet_pip.py
test/lib/ansible_test/_util/target/setup/requirements.py
test/lib/ansible_test/_util/target/tools/virtualenvcheck.py
diff --git a/setup.cfg b/setup.cfg
index 55921e3c..b2d2fd33 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -3,6 +3,7 @@ name = ansible-core
version = attr: ansible.release.__version__
description = Radically simple IT automation
long_description = file: README.rst
+long_description_content_type = text/x-rst
author = Ansible, Inc.
author_email = info@ansible.com
url = https://ansible.com/
diff --git a/test/integration/targets/ansible-test-container/aliases b/test/integration/targets/ansible-test-container/aliases
new file mode 100644
index 00000000..65a05093
--- /dev/null
+++ b/test/integration/targets/ansible-test-container/aliases
@@ -0,0 +1,5 @@
+shippable/posix/group6
+context/controller
+needs/root
+destructive
+retry/never # tests on some platforms run too long to make retries useful
diff --git a/test/integration/targets/ansible-test-container/runme.py b/test/integration/targets/ansible-test-container/runme.py
new file mode 100755
index 00000000..68712805
--- /dev/null
+++ b/test/integration/targets/ansible-test-container/runme.py
@@ -0,0 +1,1090 @@
+#!/usr/bin/env python
+"""Test suite used to verify ansible-test is able to run its containers on various container hosts."""
+
+from __future__ import annotations
+
+import abc
+import dataclasses
+import datetime
+import errno
+import functools
+import json
+import os
+import pathlib
+import pwd
+import re
+import secrets
+import shlex
+import shutil
+import signal
+import subprocess
+import sys
+import time
+import typing as t
+
+UNPRIVILEGED_USER_NAME = 'ansible-test'
+CGROUP_SYSTEMD = pathlib.Path('/sys/fs/cgroup/systemd')
+LOG_PATH = pathlib.Path('/tmp/results')
+
+# The value of /proc/*/loginuid when it is not set.
+# It is a reserved UID, which is the maximum 32-bit unsigned integer value.
+# See: https://access.redhat.com/solutions/25404
+LOGINUID_NOT_SET = 4294967295
+
+UID = os.getuid()
+
+try:
+ LOGINUID = int(pathlib.Path('/proc/self/loginuid').read_text())
+ LOGINUID_MISMATCH = LOGINUID != LOGINUID_NOT_SET and LOGINUID != UID
+except FileNotFoundError:
+ LOGINUID = None
+ LOGINUID_MISMATCH = False
+
+
+def main() -> None:
+ """Main program entry point."""
+ display.section('Startup check')
+
+ try:
+ bootstrap_type = pathlib.Path('/etc/ansible-test.bootstrap').read_text().strip()
+ except FileNotFoundError:
+ bootstrap_type = 'undefined'
+
+ display.info(f'Bootstrap type: {bootstrap_type}')
+
+ if bootstrap_type != 'remote':
+ display.warning('Skipping destructive test on system which is not an ansible-test remote provisioned instance.')
+ return
+
+ display.info(f'UID: {UID} / {LOGINUID}')
+
+ if UID != 0:
+ raise Exception('This test must be run as root.')
+
+ if not LOGINUID_MISMATCH:
+ if LOGINUID is None:
+ display.warning('Tests involving loginuid mismatch will be skipped on this host since it does not have audit support.')
+ elif LOGINUID == LOGINUID_NOT_SET:
+ display.warning('Tests involving loginuid mismatch will be skipped on this host since it is not set.')
+ elif LOGINUID == 0:
+ raise Exception('Use sudo, su, etc. as a non-root user to become root before running this test.')
+ else:
+ raise Exception()
+
+ display.section(f'Bootstrapping {os_release}')
+
+ bootstrapper = Bootstrapper.init()
+ bootstrapper.run()
+
+ result_dir = LOG_PATH
+
+ if result_dir.exists():
+ shutil.rmtree(result_dir)
+
+ result_dir.mkdir()
+ result_dir.chmod(0o777)
+
+ scenarios = get_test_scenarios()
+ results = [run_test(scenario) for scenario in scenarios]
+ error_total = 0
+
+ for name in sorted(result_dir.glob('*.log')):
+ lines = name.read_text().strip().splitlines()
+ error_count = len([line for line in lines if line.startswith('FAIL: ')])
+ error_total += error_count
+
+ display.section(f'Log ({error_count=}/{len(lines)}): {name.name}')
+
+ for line in lines:
+ if line.startswith('FAIL: '):
+ display.show(line, display.RED)
+ else:
+ display.show(line)
+
+ error_count = len([result for result in results if result.message])
+ error_total += error_count
+
+ duration = datetime.timedelta(seconds=int(sum(result.duration.total_seconds() for result in results)))
+
+ display.section(f'Test Results ({error_count=}/{len(results)}) [{duration}]')
+
+ for result in results:
+ notes = f' <cleanup: {", ".join(result.cleanup)}>' if result.cleanup else ''
+
+ if result.cgroup_dirs:
+ notes += f' <cgroup_dirs: {len(result.cgroup_dirs)}>'
+
+ notes += f' [{result.duration}]'
+
+ if result.message:
+ display.show(f'FAIL: {result.scenario} {result.message}{notes}', display.RED)
+ elif result.duration.total_seconds() >= 90:
+ display.show(f'SLOW: {result.scenario}{notes}', display.YELLOW)
+ else:
+ display.show(f'PASS: {result.scenario}{notes}')
+
+ if error_total:
+ sys.exit(1)
+
+
+def get_test_scenarios() -> list[TestScenario]:
+ """Generate and return a list of test scenarios."""
+
+ supported_engines = ('docker', 'podman')
+ available_engines = [engine for engine in supported_engines if shutil.which(engine)]
+
+ if not available_engines:
+ raise ApplicationError(f'No supported container engines found: {", ".join(supported_engines)}')
+
+ completion_lines = pathlib.Path(os.environ['PYTHONPATH'], '../test/lib/ansible_test/_data/completion/docker.txt').read_text().splitlines()
+
+ # TODO: consider including testing for the collection default image
+ entries = {name: value for name, value in (parse_completion_entry(line) for line in completion_lines) if name != 'default'}
+
+ unprivileged_user = User.get(UNPRIVILEGED_USER_NAME)
+
+ scenarios: list[TestScenario] = []
+
+ for container_name, settings in entries.items():
+ image = settings['image']
+ cgroup = settings.get('cgroup', 'v1-v2')
+
+ if container_name == 'centos6' and os_release.id == 'alpine':
+ # Alpine kernels do not emulate vsyscall by default, which causes the centos6 container to fail during init.
+ # See: https://unix.stackexchange.com/questions/478387/running-a-centos-docker-image-on-arch-linux-exits-with-code-139
+ # Other distributions enable settings which trap vsyscall by default.
+ # See: https://www.kernelconfig.io/config_legacy_vsyscall_xonly
+ # See: https://www.kernelconfig.io/config_legacy_vsyscall_emulate
+ continue
+
+ for engine in available_engines:
+ # TODO: figure out how to get tests passing using docker without disabling selinux
+ disable_selinux = os_release.id == 'fedora' and engine == 'docker' and cgroup != 'none'
+ expose_cgroup_v1 = cgroup == 'v1-only' and get_docker_info(engine).cgroup_version != 1
+ debug_systemd = cgroup != 'none'
+
+ # The sleep+pkill used to support the cgroup probe causes problems with the centos6 container.
+ # It results in sshd connections being refused or reset for many, but not all, container instances.
+ # The underlying cause of this issue is unknown.
+ probe_cgroups = container_name != 'centos6'
+
+ # The default RHEL 9 crypto policy prevents use of SHA-1.
+ # This results in SSH errors with centos6 containers: ssh_dispatch_run_fatal: Connection to 1.2.3.4 port 22: error in libcrypto
+ # See: https://access.redhat.com/solutions/6816771
+ enable_sha1 = os_release.id == 'rhel' and os_release.version_id.startswith('9.') and container_name == 'centos6'
+
+ if cgroup != 'none' and get_docker_info(engine).cgroup_version == 1 and not have_cgroup_systemd():
+ expose_cgroup_v1 = True # the host uses cgroup v1 but there is no systemd cgroup and the container requires cgroup support
+
+ user_scenarios = [
+ # TODO: test rootless docker
+ UserScenario(ssh=unprivileged_user),
+ ]
+
+ if engine == 'podman':
+ user_scenarios.append(UserScenario(ssh=ROOT_USER))
+
+ # TODO: test podman remote on Alpine and Ubuntu hosts
+ # TODO: combine remote with ssh using different unprivileged users
+ if os_release.id not in ('alpine', 'ubuntu'):
+ user_scenarios.append(UserScenario(remote=unprivileged_user))
+
+ if LOGINUID_MISMATCH:
+ user_scenarios.append(UserScenario())
+
+ for user_scenario in user_scenarios:
+ scenarios.append(
+ TestScenario(
+ user_scenario=user_scenario,
+ engine=engine,
+ container_name=container_name,
+ image=image,
+ disable_selinux=disable_selinux,
+ expose_cgroup_v1=expose_cgroup_v1,
+ enable_sha1=enable_sha1,
+ debug_systemd=debug_systemd,
+ probe_cgroups=probe_cgroups,
+ )
+ )
+
+ return scenarios
+
+
+def run_test(scenario: TestScenario) -> TestResult:
+ """Run a test scenario and return the test results."""
+ display.section(f'Testing {scenario} Started')
+
+ start = time.monotonic()
+
+ integration = ['ansible-test', 'integration', 'split']
+ integration_options = ['--target', f'docker:{scenario.container_name}', '--color', '--truncate', '0', '-v']
+ target_only_options = []
+
+ if scenario.debug_systemd:
+ integration_options.append('--dev-systemd-debug')
+
+ if scenario.probe_cgroups:
+ target_only_options = ['--dev-probe-cgroups', str(LOG_PATH)]
+
+ commands = [
+ # The cgroup probe is only performed for the first test of the target.
+ # There's no need to repeat the probe again for the same target.
+ # The controller will be tested separately as a target.
+ # This ensures that both the probe and no-probe code paths are functional.
+ [*integration, *integration_options, *target_only_options],
+ # For the split test we'll use alpine3 as the controller. There are two reasons for this:
+ # 1) It doesn't require the cgroup v1 hack, so we can test a target that doesn't need that.
+ # 2) It doesn't require disabling selinux, so we can test a target that doesn't need that.
+ [*integration, '--controller', 'docker:alpine3', *integration_options],
+ ]
+
+ common_env: dict[str, str] = {}
+ test_env: dict[str, str] = {}
+
+ if scenario.engine == 'podman':
+ if scenario.user_scenario.remote:
+ common_env.update(
+ # Podman 4.3.0 has a regression which requires a port for remote connections to work.
+ # See: https://github.com/containers/podman/issues/16509
+ CONTAINER_HOST=f'ssh://{scenario.user_scenario.remote.name}@localhost:22'
+ f'/run/user/{scenario.user_scenario.remote.pwnam.pw_uid}/podman/podman.sock',
+ CONTAINER_SSHKEY=str(pathlib.Path('~/.ssh/id_rsa').expanduser()), # TODO: add support for ssh + remote when the ssh user is not root
+ )
+
+ test_env.update(ANSIBLE_TEST_PREFER_PODMAN='1')
+
+ test_env.update(common_env)
+
+ if scenario.user_scenario.ssh:
+ client_become_cmd = ['ssh', f'{scenario.user_scenario.ssh.name}@localhost']
+ test_commands = [client_become_cmd + [f'cd ~/ansible; {format_env(test_env)}{sys.executable} bin/{shlex.join(command)}'] for command in commands]
+ else:
+ client_become_cmd = ['sh', '-c']
+ test_commands = [client_become_cmd + [f'{format_env(test_env)}{shlex.join(command)}'] for command in commands]
+
+ prime_storage_command = []
+
+ if scenario.engine == 'podman' and scenario.user_scenario.actual.name == UNPRIVILEGED_USER_NAME:
+ # When testing podman we need to make sure that the overlay filesystem is used instead of vfs.
+ # Using the vfs filesystem will result in running out of disk space during the tests.
+ # To change the filesystem used, the existing storage directory must be removed before "priming" the storage database.
+ #
+ # Without this change the following message may be displayed:
+ #
+ # User-selected graph driver "overlay" overwritten by graph driver "vfs" from database - delete libpod local files to resolve
+ #
+ # However, with this change it may be replaced with the following message:
+ #
+ # User-selected graph driver "vfs" overwritten by graph driver "overlay" from database - delete libpod local files to resolve
+
+ actual_become_cmd = ['ssh', f'{scenario.user_scenario.actual.name}@localhost']
+ prime_storage_command = actual_become_cmd + prepare_prime_podman_storage()
+
+ message = ''
+
+ if scenario.expose_cgroup_v1:
+ prepare_cgroup_systemd(scenario.user_scenario.actual.name, scenario.engine)
+
+ try:
+ if prime_storage_command:
+ retry_command(lambda: run_command(*prime_storage_command), retry_any_error=True)
+
+ if scenario.disable_selinux:
+ run_command('setenforce', 'permissive')
+
+ if scenario.enable_sha1:
+ run_command('update-crypto-policies', '--set', 'DEFAULT:SHA1')
+
+ for test_command in test_commands:
+ retry_command(lambda: run_command(*test_command))
+ except SubprocessError as ex:
+ message = str(ex)
+ display.error(f'{scenario} {message}')
+ finally:
+ if scenario.enable_sha1:
+ run_command('update-crypto-policies', '--set', 'DEFAULT')
+
+ if scenario.disable_selinux:
+ run_command('setenforce', 'enforcing')
+
+ if scenario.expose_cgroup_v1:
+ dirs = remove_cgroup_systemd()
+ else:
+ dirs = list_group_systemd()
+
+ cleanup_command = [scenario.engine, 'rmi', '-f', scenario.image]
+
+ try:
+ retry_command(lambda: run_command(*client_become_cmd + [f'{format_env(common_env)}{shlex.join(cleanup_command)}']), retry_any_error=True)
+ except SubprocessError as ex:
+ display.error(str(ex))
+
+ cleanup = cleanup_podman() if scenario.engine == 'podman' else tuple()
+
+ finish = time.monotonic()
+ duration = datetime.timedelta(seconds=int(finish - start))
+
+ display.section(f'Testing {scenario} Completed in {duration}')
+
+ return TestResult(
+ scenario=scenario,
+ message=message,
+ cleanup=cleanup,
+ duration=duration,
+ cgroup_dirs=tuple(str(path) for path in dirs),
+ )
+
+
+def prepare_prime_podman_storage() -> list[str]:
+ """Partially prime podman storage and return a command to complete the remainder."""
+ prime_storage_command = ['rm -rf ~/.local/share/containers; STORAGE_DRIVER=overlay podman pull quay.io/bedrock/alpine:3.16.2']
+
+ test_containers = pathlib.Path(f'~{UNPRIVILEGED_USER_NAME}/.local/share/containers').expanduser()
+
+ if test_containers.is_dir():
+ # First remove the directory as root, since the user may not have permissions on all the files.
+ # The directory will be removed again after login, before initializing the database.
+ rmtree(test_containers)
+
+ return prime_storage_command
+
+
+def cleanup_podman() -> tuple[str, ...]:
+ """Cleanup podman processes and files on disk."""
+ cleanup = []
+
+ for remaining in range(3, -1, -1):
+ processes = [(int(item[0]), item[1]) for item in
+ [item.split(maxsplit=1) for item in run_command('ps', '-A', '-o', 'pid,comm', capture=True).stdout.splitlines()]
+ if pathlib.Path(item[1].split()[0]).name in ('catatonit', 'podman', 'conmon')]
+
+ if not processes:
+ break
+
+ for pid, name in processes:
+ display.info(f'Killing "{name}" ({pid}) ...')
+
+ try:
+ os.kill(pid, signal.SIGTERM if remaining > 1 else signal.SIGKILL)
+ except ProcessLookupError:
+ pass
+
+ cleanup.append(name)
+
+ time.sleep(1)
+ else:
+ raise Exception('failed to kill all matching processes')
+
+ uid = pwd.getpwnam(UNPRIVILEGED_USER_NAME).pw_uid
+
+ container_tmp = pathlib.Path(f'/tmp/containers-user-{uid}')
+ podman_tmp = pathlib.Path(f'/tmp/podman-run-{uid}')
+
+ user_config = pathlib.Path(f'~{UNPRIVILEGED_USER_NAME}/.config').expanduser()
+ user_local = pathlib.Path(f'~{UNPRIVILEGED_USER_NAME}/.local').expanduser()
+
+ if container_tmp.is_dir():
+ rmtree(container_tmp)
+
+ if podman_tmp.is_dir():
+ rmtree(podman_tmp)
+
+ if user_config.is_dir():
+ rmtree(user_config)
+
+ if user_local.is_dir():
+ rmtree(user_local)
+
+ return tuple(sorted(set(cleanup)))
+
+
+def have_cgroup_systemd() -> bool:
+ """Return True if the container host has a systemd cgroup."""
+ return pathlib.Path(CGROUP_SYSTEMD).is_dir()
+
+
+def prepare_cgroup_systemd(username: str, engine: str) -> None:
+ """Prepare the systemd cgroup."""
+ CGROUP_SYSTEMD.mkdir()
+
+ run_command('mount', 'cgroup', '-t', 'cgroup', str(CGROUP_SYSTEMD), '-o', 'none,name=systemd,xattr', capture=True)
+
+ if engine == 'podman':
+ run_command('chown', '-R', f'{username}:{username}', str(CGROUP_SYSTEMD))
+
+ run_command('find', str(CGROUP_SYSTEMD), '-type', 'd', '-exec', 'ls', '-l', '{}', ';')
+
+
+def list_group_systemd() -> list[pathlib.Path]:
+ """List the systemd cgroup."""
+ dirs = set()
+
+ for dirpath, dirnames, filenames in os.walk(CGROUP_SYSTEMD, topdown=False):
+ for dirname in dirnames:
+ target_path = pathlib.Path(dirpath, dirname)
+ display.info(f'dir: {target_path}')
+ dirs.add(target_path)
+
+ return sorted(dirs)
+
+
+def remove_cgroup_systemd() -> list[pathlib.Path]:
+ """Remove the systemd cgroup."""
+ dirs = set()
+
+ for sleep_seconds in range(1, 10):
+ try:
+ for dirpath, dirnames, filenames in os.walk(CGROUP_SYSTEMD, topdown=False):
+ for dirname in dirnames:
+ target_path = pathlib.Path(dirpath, dirname)
+ display.info(f'rmdir: {target_path}')
+ dirs.add(target_path)
+ target_path.rmdir()
+ except OSError as ex:
+ if ex.errno != errno.EBUSY:
+ raise
+
+ error = str(ex)
+ else:
+ break
+
+ display.warning(f'{error} -- sleeping for {sleep_seconds} second(s) before trying again ...') # pylint: disable=used-before-assignment
+
+ time.sleep(sleep_seconds)
+
+ time.sleep(1) # allow time for cgroups to be fully removed before unmounting
+
+ run_command('umount', str(CGROUP_SYSTEMD))
+
+ CGROUP_SYSTEMD.rmdir()
+
+ time.sleep(1) # allow time for cgroup hierarchy to be removed after unmounting
+
+ cgroup = pathlib.Path('/proc/self/cgroup').read_text()
+
+ if 'systemd' in cgroup:
+ raise Exception('systemd hierarchy detected')
+
+ return sorted(dirs)
+
+
+def rmtree(path: pathlib.Path) -> None:
+ """Wrapper around shutil.rmtree with additional error handling."""
+ for retries in range(10, -1, -1):
+ try:
+ display.info(f'rmtree: {path} ({retries} attempts remaining) ... ')
+ shutil.rmtree(path)
+ except Exception:
+ if not path.exists():
+ display.info(f'rmtree: {path} (not found)')
+ return
+
+ if not path.is_dir():
+ display.info(f'rmtree: {path} (not a directory)')
+ return
+
+ if retries:
+ continue
+
+ raise
+ else:
+ display.info(f'rmtree: {path} (done)')
+ return
+
+
+def format_env(env: dict[str, str]) -> str:
+ """Format an env dict for injection into a shell command and return the resulting string."""
+ if env:
+ return ' '.join(f'{shlex.quote(key)}={shlex.quote(value)}' for key, value in env.items()) + ' '
+
+ return ''
+
+
+class DockerInfo:
+ """The results of `docker info` for the container runtime."""
+
+ def __init__(self, data: dict[str, t.Any]) -> None:
+ self.data = data
+
+ @property
+ def cgroup_version(self) -> int:
+ """The cgroup version of the container host."""
+ data = self.data
+ host = data.get('host')
+
+ if host:
+ version = int(host['cgroupVersion'].lstrip('v')) # podman
+ else:
+ version = int(data['CgroupVersion']) # docker
+
+ return version
+
+
+@functools.lru_cache
+def get_docker_info(engine: str) -> DockerInfo:
+ """Return info for the current container runtime. The results are cached."""
+ return DockerInfo(json.loads(run_command(engine, 'info', '--format', '{{ json . }}', capture=True).stdout))
+
+
+@dataclasses.dataclass(frozen=True)
+class User:
+ name: str
+ pwnam: pwd.struct_passwd
+
+ @classmethod
+ def get(cls, name: str) -> User:
+ return User(
+ name=name,
+ pwnam=pwd.getpwnam(name),
+ )
+
+
+@dataclasses.dataclass(frozen=True)
+class UserScenario:
+ ssh: User = None
+ remote: User = None
+
+ @property
+ def actual(self) -> User:
+ return self.remote or self.ssh or ROOT_USER
+
+
+@dataclasses.dataclass(frozen=True)
+class TestScenario:
+ user_scenario: UserScenario
+ engine: str
+ container_name: str
+ image: str
+ disable_selinux: bool
+ expose_cgroup_v1: bool
+ enable_sha1: bool
+ debug_systemd: bool
+ probe_cgroups: bool
+
+ @property
+ def tags(self) -> tuple[str, ...]:
+ tags = []
+
+ if self.user_scenario.ssh:
+ tags.append(f'ssh: {self.user_scenario.ssh.name}')
+
+ if self.user_scenario.remote:
+ tags.append(f'remote: {self.user_scenario.remote.name}')
+
+ if self.disable_selinux:
+ tags.append('selinux: permissive')
+
+ if self.expose_cgroup_v1:
+ tags.append('cgroup: v1')
+
+ if self.enable_sha1:
+ tags.append('sha1: enabled')
+
+ return tuple(tags)
+
+ @property
+ def tag_label(self) -> str:
+ return ' '.join(f'[{tag}]' for tag in self.tags)
+
+ def __str__(self):
+ return f'[{self.container_name}] ({self.engine}) {self.tag_label}'.strip()
+
+
+@dataclasses.dataclass(frozen=True)
+class TestResult:
+ scenario: TestScenario
+ message: str
+ cleanup: tuple[str, ...]
+ duration: datetime.timedelta
+ cgroup_dirs: tuple[str, ...]
+
+
+def parse_completion_entry(value: str) -> tuple[str, dict[str, str]]:
+ """Parse the given completion entry, returning the entry name and a dictionary of key/value settings."""
+ values = value.split()
+
+ name = values[0]
+ data = {kvp[0]: kvp[1] if len(kvp) > 1 else '' for kvp in [item.split('=', 1) for item in values[1:]]}
+
+ return name, data
+
+
+@dataclasses.dataclass(frozen=True)
+class SubprocessResult:
+ """Result from execution of a subprocess."""
+
+ command: list[str]
+ stdout: str
+ stderr: str
+ status: int
+
+
+class ApplicationError(Exception):
+ """An application error."""
+
+ def __init__(self, message: str) -> None:
+ self.message = message
+
+ super().__init__(message)
+
+
+class SubprocessError(ApplicationError):
+ """An error from executing a subprocess."""
+
+ def __init__(self, result: SubprocessResult) -> None:
+ self.result = result
+
+ message = f'Command `{shlex.join(result.command)}` exited with status: {result.status}'
+
+ stdout = (result.stdout or '').strip()
+ stderr = (result.stderr or '').strip()
+
+ if stdout:
+ message += f'\n>>> Standard Output\n{stdout}'
+
+ if stderr:
+ message += f'\n>>> Standard Error\n{stderr}'
+
+ super().__init__(message)
+
+
+class ProgramNotFoundError(ApplicationError):
+ """A required program was not found."""
+
+ def __init__(self, name: str) -> None:
+ self.name = name
+
+ super().__init__(f'Missing program: {name}')
+
+
+class Display:
+ """Display interface for sending output to the console."""
+
+ CLEAR = '\033[0m'
+ RED = '\033[31m'
+ GREEN = '\033[32m'
+ YELLOW = '\033[33m'
+ BLUE = '\033[34m'
+ PURPLE = '\033[35m'
+ CYAN = '\033[36m'
+
+ def __init__(self) -> None:
+ self.sensitive: set[str] = set()
+
+ def section(self, message: str) -> None:
+ """Print a section message to the console."""
+ self.show(f'==> {message}', color=self.BLUE)
+
+ def subsection(self, message: str) -> None:
+ """Print a subsection message to the console."""
+ self.show(f'--> {message}', color=self.CYAN)
+
+ def fatal(self, message: str) -> None:
+ """Print a fatal message to the console."""
+ self.show(f'FATAL: {message}', color=self.RED)
+
+ def error(self, message: str) -> None:
+ """Print an error message to the console."""
+ self.show(f'ERROR: {message}', color=self.RED)
+
+ def warning(self, message: str) -> None:
+ """Print a warning message to the console."""
+ self.show(f'WARNING: {message}', color=self.PURPLE)
+
+ def info(self, message: str) -> None:
+ """Print an info message to the console."""
+ self.show(f'INFO: {message}', color=self.YELLOW)
+
+ def show(self, message: str, color: str | None = None) -> None:
+ """Print a message to the console."""
+ for item in self.sensitive:
+ message = message.replace(item, '*' * len(item))
+
+ print(f'{color or self.CLEAR}{message}{self.CLEAR}', flush=True)
+
+
+def run_module(
+ module: str,
+ args: dict[str, t.Any],
+) -> SubprocessResult:
+ """Run the specified Ansible module and return the result."""
+ return run_command('ansible', '-m', module, '-v', '-a', json.dumps(args), 'localhost')
+
+
+def retry_command(func: t.Callable[[], SubprocessResult], attempts: int = 3, retry_any_error: bool = False) -> SubprocessResult:
+ """Run the given command function up to the specified number of attempts when the failure is due to an SSH error."""
+ for attempts_remaining in range(attempts - 1, -1, -1):
+ try:
+ return func()
+ except SubprocessError as ex:
+ if ex.result.command[0] == 'ssh' and ex.result.status == 255 and attempts_remaining:
+ # SSH connections on our Ubuntu 22.04 host sometimes fail for unknown reasons.
+ # This retry should allow the test suite to continue, maintaining CI stability.
+ # TODO: Figure out why local SSH connections sometimes fail during the test run.
+ display.warning('Command failed due to an SSH error. Waiting a few seconds before retrying.')
+ time.sleep(3)
+ continue
+
+ if retry_any_error:
+ display.warning('Command failed. Waiting a few seconds before retrying.')
+ time.sleep(3)
+ continue
+
+ raise
+
+
+def run_command(
+ *command: str,
+ data: str | None = None,
+ stdin: int | t.IO[bytes] | None = None,
+ env: dict[str, str] | None = None,
+ capture: bool = False,
+) -> SubprocessResult:
+ """Run the specified command and return the result."""
+ stdin = subprocess.PIPE if data else stdin or subprocess.DEVNULL
+ stdout = subprocess.PIPE if capture else None
+ stderr = subprocess.PIPE if capture else None
+
+ display.subsection(f'Run command: {shlex.join(command)}')
+
+ try:
+ with subprocess.Popen(args=command, stdin=stdin, stdout=stdout, stderr=stderr, env=env, text=True) as process:
+ process_stdout, process_stderr = process.communicate(data)
+ process_status = process.returncode
+ except FileNotFoundError:
+ raise ProgramNotFoundError(command[0]) from None
+
+ result = SubprocessResult(
+ command=list(command),
+ stdout=process_stdout,
+ stderr=process_stderr,
+ status=process_status,
+ )
+
+ if process.returncode != 0:
+ raise SubprocessError(result)
+
+ return result
+
+
+class Bootstrapper(metaclass=abc.ABCMeta):
+ """Bootstrapper for remote instances."""
+
+ @classmethod
+ def install_podman(cls) -> bool:
+ """Return True if podman will be installed."""
+ return False
+
+ @classmethod
+ def install_docker(cls) -> bool:
+ """Return True if docker will be installed."""
+ return False
+
+ @classmethod
+ def usable(cls) -> bool:
+ """Return True if the bootstrapper can be used, otherwise False."""
+ return False
+
+ @classmethod
+ def init(cls) -> t.Type[Bootstrapper]:
+ """Return a bootstrapper type appropriate for the current system."""
+ for bootstrapper in cls.__subclasses__():
+ if bootstrapper.usable():
+ return bootstrapper
+
+ display.warning('No supported bootstrapper found.')
+ return Bootstrapper
+
+ @classmethod
+ def run(cls) -> None:
+ """Run the bootstrapper."""
+ cls.configure_root_user()
+ cls.configure_unprivileged_user()
+ cls.configure_source_trees()
+ cls.configure_ssh_keys()
+ cls.configure_podman_remote()
+
+ @classmethod
+ def configure_root_user(cls) -> None:
+ """Configure the root user to run tests."""
+ root_password_status = run_command('passwd', '--status', 'root', capture=True)
+ root_password_set = root_password_status.stdout.split()[1]
+
+ if root_password_set not in ('P', 'PS'):
+ root_password = run_command('openssl', 'passwd', '-5', '-stdin', data=secrets.token_hex(8), capture=True).stdout.strip()
+
+ run_module(
+ 'user',
+ dict(
+ user='root',
+ password=root_password,
+ ),
+ )
+
+ @classmethod
+ def configure_unprivileged_user(cls) -> None:
+ """Configure the unprivileged user to run tests."""
+ unprivileged_password = run_command('openssl', 'passwd', '-5', '-stdin', data=secrets.token_hex(8), capture=True).stdout.strip()
+
+ run_module(
+ 'user',
+ dict(
+ user=UNPRIVILEGED_USER_NAME,
+ password=unprivileged_password,
+ groups=['docker'] if cls.install_docker() else [],
+ append=True,
+ ),
+ )
+
+ if os_release.id == 'alpine':
+ # Most distros handle this automatically, but not Alpine.
+ # See: https://www.redhat.com/sysadmin/rootless-podman
+ start = 165535
+ end = start + 65535
+ id_range = f'{start}-{end}'
+
+ run_command(
+ 'usermod',
+ '--add-subuids',
+ id_range,
+ '--add-subgids',
+ id_range,
+ UNPRIVILEGED_USER_NAME,
+ )
+
+ @classmethod
+ def configure_source_trees(cls):
+ """Configure the source trees needed to run tests for both root and the unprivileged user."""
+ current_ansible = pathlib.Path(os.environ['PYTHONPATH']).parent
+
+ root_ansible = pathlib.Path('~').expanduser() / 'ansible'
+ test_ansible = pathlib.Path(f'~{UNPRIVILEGED_USER_NAME}').expanduser() / 'ansible'
+
+ if current_ansible != root_ansible:
+ display.info(f'copying {current_ansible} -> {root_ansible} ...')
+ rmtree(root_ansible)
+ shutil.copytree(current_ansible, root_ansible)
+ run_command('chown', '-R', 'root:root', str(root_ansible))
+
+ display.info(f'copying {current_ansible} -> {test_ansible} ...')
+ rmtree(test_ansible)
+ shutil.copytree(current_ansible, test_ansible)
+ run_command('chown', '-R', f'{UNPRIVILEGED_USER_NAME}:{UNPRIVILEGED_USER_NAME}', str(test_ansible))
+
+ paths = [pathlib.Path(test_ansible)]
+
+ for root, dir_names, file_names in os.walk(test_ansible):
+ paths.extend(pathlib.Path(root, dir_name) for dir_name in dir_names)
+ paths.extend(pathlib.Path(root, file_name) for file_name in file_names)
+
+ user = pwd.getpwnam(UNPRIVILEGED_USER_NAME)
+ uid = user.pw_uid
+ gid = user.pw_gid
+
+ for path in paths:
+ os.chown(path, uid, gid)
+
+ @classmethod
+ def configure_ssh_keys(cls) -> None:
+ """Configure SSH keys needed to run tests."""
+ user = pwd.getpwnam(UNPRIVILEGED_USER_NAME)
+ uid = user.pw_uid
+ gid = user.pw_gid
+
+ current_rsa_pub = pathlib.Path('~/.ssh/id_rsa.pub').expanduser()
+
+ test_authorized_keys = pathlib.Path(f'~{UNPRIVILEGED_USER_NAME}/.ssh/authorized_keys').expanduser()
+
+ test_authorized_keys.parent.mkdir(mode=0o755, parents=True, exist_ok=True)
+ os.chown(test_authorized_keys.parent, uid, gid)
+
+ shutil.copyfile(current_rsa_pub, test_authorized_keys)
+ os.chown(test_authorized_keys, uid, gid)
+ test_authorized_keys.chmod(mode=0o644)
+
+ @classmethod
+ def configure_podman_remote(cls) -> None:
+ """Configure podman remote support."""
+ # TODO: figure out how to support remote podman without systemd (Alpine)
+ # TODO: figure out how to support remote podman on Ubuntu
+ if os_release.id in ('alpine', 'ubuntu'):
+ return
+
+ # Support podman remote on any host with systemd available.
+ retry_command(lambda: run_command('ssh', f'{UNPRIVILEGED_USER_NAME}@localhost', 'systemctl', '--user', 'enable', '--now', 'podman.socket'))
+ run_command('loginctl', 'enable-linger', UNPRIVILEGED_USER_NAME)
+
+
+class DnfBootstrapper(Bootstrapper):
+ """Bootstrapper for dnf based systems."""
+
+ @classmethod
+ def install_podman(cls) -> bool:
+ """Return True if podman will be installed."""
+ return True
+
+ @classmethod
+ def install_docker(cls) -> bool:
+ """Return True if docker will be installed."""
+ return os_release.id != 'rhel'
+
+ @classmethod
+ def usable(cls) -> bool:
+ """Return True if the bootstrapper can be used, otherwise False."""
+ return bool(shutil.which('dnf'))
+
+ @classmethod
+ def run(cls) -> None:
+ """Run the bootstrapper."""
+ # NOTE: Install crun to make it available to podman, otherwise installing moby-engine can cause podman to use runc instead.
+ packages = ['podman', 'crun']
+
+ if cls.install_docker():
+ packages.append('moby-engine')
+
+ if os_release.id == 'fedora' and os_release.version_id == '36':
+ # In Fedora 36 the current version of netavark, 1.2.0, causes TCP connect to hang between rootfull containers.
+ # The previously tested version, 1.1.0, did not have this issue.
+ # Unfortunately, with the release of 1.2.0 the 1.1.0 package was removed from the repositories.
+ # Thankfully the 1.0.2 version is available and also works, so we'll use that here until a fixed version is available.
+ # See: https://github.com/containers/netavark/issues/491
+ packages.append('netavark-1.0.2')
+
+ if os_release.id == 'rhel':
+ # As of the release of RHEL 9.1, installing podman on RHEL 9.0 results in a non-fatal error at install time:
+ #
+ # libsemanage.semanage_pipe_data: Child process /usr/libexec/selinux/hll/pp failed with code: 255. (No such file or directory).
+ # container: libsepol.policydb_read: policydb module version 21 does not match my version range 4-20
+ # container: libsepol.sepol_module_package_read: invalid module in module package (at section 0)
+ # container: Failed to read policy package
+ # libsemanage.semanage_direct_commit: Failed to compile hll files into cil files.
+ # (No such file or directory).
+ # /usr/sbin/semodule: Failed!
+ #
+ # Unfortunately this is then fatal when running podman, resulting in no error message and a 127 return code.
+ # The solution is to update the policycoreutils package *before* installing podman.
+ #
+ # NOTE: This work-around can probably be removed once we're testing on RHEL 9.1, as the updated packages should already be installed.
+ # Unfortunately at this time there is no RHEL 9.1 AMI available (other than the Beta release).
+
+ run_command('dnf', 'update', '-y', 'policycoreutils')
+
+ run_command('dnf', 'install', '-y', *packages)
+
+ if cls.install_docker():
+ run_command('systemctl', 'start', 'docker')
+
+ if os_release.id == 'rhel' and os_release.version_id.startswith('8.'):
+ # RHEL 8 defaults to using runc instead of crun.
+ # Unfortunately runc seems to have issues with podman remote.
+ # Specifically, it tends to cause conmon to burn CPU until it reaches the specified exit delay.
+ # So we'll just change the system default to crun instead.
+ # Unfortunately we can't do this with the `--runtime` option since that doesn't work with podman remote.
+
+ conf = pathlib.Path('/usr/share/containers/containers.conf').read_text()
+
+ conf = re.sub('^runtime .*', 'runtime = "crun"', conf, flags=re.MULTILINE)
+
+ pathlib.Path('/etc/containers/containers.conf').write_text(conf)
+
+ super().run()
+
+
+class AptBootstrapper(Bootstrapper):
+ """Bootstrapper for apt based systems."""
+
+ @classmethod
+ def install_podman(cls) -> bool:
+ """Return True if podman will be installed."""
+ return not (os_release.id == 'ubuntu' and os_release.version_id == '20.04')
+
+ @classmethod
+ def install_docker(cls) -> bool:
+ """Return True if docker will be installed."""
+ return True
+
+ @classmethod
+ def usable(cls) -> bool:
+ """Return True if the bootstrapper can be used, otherwise False."""
+ return bool(shutil.which('apt-get'))
+
+ @classmethod
+ def run(cls) -> None:
+ """Run the bootstrapper."""
+ apt_env = os.environ.copy()
+ apt_env.update(
+ DEBIAN_FRONTEND='noninteractive',
+ )
+
+ packages = ['docker.io']
+
+ if cls.install_podman():
+ # NOTE: Install crun to make it available to podman, otherwise installing docker.io can cause podman to use runc instead.
+ # Using podman rootless requires the `newuidmap` and `slirp4netns` commands.
+ packages.extend(('podman', 'crun', 'uidmap', 'slirp4netns'))
+
+ run_command('apt-get', 'install', *packages, '-y', '--no-install-recommends', env=apt_env)
+
+ super().run()
+
+
+class ApkBootstrapper(Bootstrapper):
+ """Bootstrapper for apk based systems."""
+
+ @classmethod
+ def install_podman(cls) -> bool:
+ """Return True if podman will be installed."""
+ return True
+
+ @classmethod
+ def install_docker(cls) -> bool:
+ """Return True if docker will be installed."""
+ return True
+
+ @classmethod
+ def usable(cls) -> bool:
+ """Return True if the bootstrapper can be used, otherwise False."""
+ return bool(shutil.which('apk'))
+
+ @classmethod
+ def run(cls) -> None:
+ """Run the bootstrapper."""
+ # The `openssl` package is used to generate hashed passwords.
+ packages = ['docker', 'podman', 'openssl']
+
+ run_command('apk', 'add', *packages)
+ run_command('service', 'docker', 'start')
+ run_command('modprobe', 'tun')
+
+ super().run()
+
+
+@dataclasses.dataclass(frozen=True)
+class OsRelease:
+ """Operating system identification."""
+
+ id: str
+ version_id: str
+
+ @staticmethod
+ def init() -> OsRelease:
+ """Detect the current OS release and return the result."""
+ lines = run_command('sh', '-c', '. /etc/os-release && echo $ID && echo $VERSION_ID', capture=True).stdout.splitlines()
+
+ result = OsRelease(
+ id=lines[0],
+ version_id=lines[1],
+ )
+
+ display.show(f'Detected OS "{result.id}" version "{result.version_id}".')
+
+ return result
+
+
+display = Display()
+os_release = OsRelease.init()
+
+ROOT_USER = User.get('root')
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/ansible-test-container/runme.sh b/test/integration/targets/ansible-test-container/runme.sh
new file mode 100755
index 00000000..56fd6690
--- /dev/null
+++ b/test/integration/targets/ansible-test-container/runme.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+set -eu
+
+./runme.py
diff --git a/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/col/plugins/modules/invalid_yaml_syntax.py b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/col/plugins/modules/invalid_yaml_syntax.py
new file mode 100644
index 00000000..5dd753f7
--- /dev/null
+++ b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/col/plugins/modules/invalid_yaml_syntax.py
@@ -0,0 +1,27 @@
+#!/usr/bin/python
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+- key: "value"wrong
+'''
+
+EXAMPLES = '''
+- key: "value"wrong
+'''
+
+RETURN = '''
+- key: "value"wrong
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ AnsibleModule(argument_spec=dict())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/README.rst b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/README.rst
new file mode 100644
index 00000000..bf1003fa
--- /dev/null
+++ b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/README.rst
@@ -0,0 +1,3 @@
+README
+------
+This is a simple collection used to test failures with ``ansible-test sanity --test validate-modules``.
diff --git a/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/galaxy.yml b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/galaxy.yml
new file mode 100644
index 00000000..3b116713
--- /dev/null
+++ b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/galaxy.yml
@@ -0,0 +1,6 @@
+namespace: ns
+name: failure
+version: 1.0.0
+readme: README.rst
+authors:
+ - Ansible
diff --git a/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/meta/main.yml b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/meta/main.yml
new file mode 100644
index 00000000..1602a255
--- /dev/null
+++ b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/meta/main.yml
@@ -0,0 +1 @@
+requires_ansible: '>=2.9'
diff --git a/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/plugins/modules/failure_ps.ps1 b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/plugins/modules/failure_ps.ps1
new file mode 100644
index 00000000..6ec04393
--- /dev/null
+++ b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/plugins/modules/failure_ps.ps1
@@ -0,0 +1,16 @@
+#!powershell
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#AnsibleRequires -CSharpUtil Ansible.Basic
+
+throw "test inner error message"
+
+$module = [Ansible.Basic.AnsibleModule]::Create($args, @{
+ options = @{
+ test = @{ type = 'str'; choices = @('foo', 'bar'); default = 'foo' }
+ }
+ })
+
+$module.Result.test = 'abc'
+
+$module.ExitJson()
diff --git a/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/plugins/modules/failure_ps.yml b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/plugins/modules/failure_ps.yml
new file mode 100644
index 00000000..c657ec9b
--- /dev/null
+++ b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/plugins/modules/failure_ps.yml
@@ -0,0 +1,31 @@
+# Copyright (c) 2022 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION:
+ module: failure_ps
+ short_description: Short description for failure_ps module
+ description:
+ - Description for failure_ps module
+ options:
+ test:
+ description:
+ - Description for test module option
+ type: str
+ choices:
+ - foo
+ - bar
+ default: foo
+ author:
+ - Ansible Core Team
+
+EXAMPLES: |
+ - name: example for failure_ps
+ ns.col.failure_ps:
+ test: bar
+
+RETURN:
+ test:
+ description: The test return value
+ returned: always
+ type: str
+ sample: abc
diff --git a/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/plugins/module_utils/share_module.psm1 b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/plugins/module_utils/share_module.psm1
new file mode 100644
index 00000000..1e8ff905
--- /dev/null
+++ b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/plugins/module_utils/share_module.psm1
@@ -0,0 +1,19 @@
+#AnsibleRequires -CSharpUtil Ansible.Basic
+
+Function Invoke-AnsibleModule {
+ <#
+ .SYNOPSIS
+ validate
+ #>
+ [CmdletBinding()]
+ param ()
+
+ $module = [Ansible.Basic.AnsibleModule]::Create(@(), @{
+ options = @{
+ test = @{ type = 'str' }
+ }
+ })
+ $module.ExitJson()
+}
+
+Export-ModuleMember -Function Invoke-AnsibleModule
diff --git a/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/plugins/modules/in_function.ps1 b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/plugins/modules/in_function.ps1
new file mode 100644
index 00000000..8f74edcc
--- /dev/null
+++ b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/plugins/modules/in_function.ps1
@@ -0,0 +1,7 @@
+#!powershell
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#AnsibleRequires -CSharpUtil Ansible.Basic
+#AnsibleRequires -PowerShell ..module_utils.share_module
+
+Invoke-AnsibleModule
diff --git a/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/plugins/modules/in_function.yml b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/plugins/modules/in_function.yml
new file mode 100644
index 00000000..87d3ec77
--- /dev/null
+++ b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/plugins/modules/in_function.yml
@@ -0,0 +1,25 @@
+# Copyright (c) 2022 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION:
+ module: in_function
+ short_description: Short description for in_function module
+ description:
+ - Description for in_function module
+ options:
+ test:
+ description: Description for test
+ type: str
+ author:
+ - Ansible Core Team
+
+EXAMPLES: |
+ - name: example for sidecar
+ ns.col.in_function:
+
+RETURN:
+ test:
+ description: The test return value
+ returned: always
+ type: str
+ sample: abc
diff --git a/test/integration/targets/ansible-test-sanity-validate-modules/expected.txt b/test/integration/targets/ansible-test-sanity-validate-modules/expected.txt
new file mode 100644
index 00000000..95f12f39
--- /dev/null
+++ b/test/integration/targets/ansible-test-sanity-validate-modules/expected.txt
@@ -0,0 +1,5 @@
+plugins/modules/invalid_yaml_syntax.py:0:0: deprecation-mismatch: "meta/runtime.yml" and DOCUMENTATION.deprecation do not agree.
+plugins/modules/invalid_yaml_syntax.py:0:0: missing-documentation: No DOCUMENTATION provided
+plugins/modules/invalid_yaml_syntax.py:8:15: documentation-syntax-error: DOCUMENTATION is not valid YAML
+plugins/modules/invalid_yaml_syntax.py:12:15: invalid-examples: EXAMPLES is not valid YAML
+plugins/modules/invalid_yaml_syntax.py:16:15: return-syntax-error: RETURN is not valid YAML
diff --git a/test/integration/targets/ansible-test-sanity-validate-modules/runme.sh b/test/integration/targets/ansible-test-sanity-validate-modules/runme.sh
index 1b051b3a..e0299969 100755
--- a/test/integration/targets/ansible-test-sanity-validate-modules/runme.sh
+++ b/test/integration/targets/ansible-test-sanity-validate-modules/runme.sh
@@ -4,7 +4,9 @@ source ../collection/setup.sh
set -eux
-ansible-test sanity --test validate-modules --color --truncate 0 "${@}"
+ansible-test sanity --test validate-modules --color --truncate 0 --failure-ok --lint "${@}" 1> actual-stdout.txt 2> actual-stderr.txt
+diff -u "${TEST_DIR}/expected.txt" actual-stdout.txt
+grep -f "${TEST_DIR}/expected.txt" actual-stderr.txt
cd ../ps_only
@@ -15,3 +17,18 @@ fi
# Use a PowerShell-only collection to verify that validate-modules does not load the collection loader multiple times.
ansible-test sanity --test validate-modules --color --truncate 0 "${@}"
+
+cd ../failure
+
+if ansible-test sanity --test validate-modules --color --truncate 0 "${@}" 1> ansible-stdout.txt 2> ansible-stderr.txt; then
+ echo "ansible-test sanity for failure should cause failure"
+ exit 1
+fi
+
+cat ansible-stdout.txt
+grep -q "ERROR: plugins/modules/failure_ps.ps1:0:0: import-error: Exception attempting to import module for argument_spec introspection" < ansible-stdout.txt
+grep -q "test inner error message" < ansible-stdout.txt
+
+cat ansible-stderr.txt
+grep -q "FATAL: The 1 sanity test(s) listed below (out of 1) failed" < ansible-stderr.txt
+grep -q "validate-modules" < ansible-stderr.txt
diff --git a/test/integration/targets/argspec/library/argspec.py b/test/integration/targets/argspec/library/argspec.py
index 1a1d288d..b6d6d110 100644
--- a/test/integration/targets/argspec/library/argspec.py
+++ b/test/integration/targets/argspec/library/argspec.py
@@ -34,7 +34,7 @@ def main():
'elements': 'dict',
'options': {
'thing': {},
- 'other': {},
+ 'other': {'aliases': ['other_alias']},
},
},
'required_by': {
@@ -136,9 +136,111 @@ def main():
'bar': {
'type': 'str',
'default': 'baz',
+ 'aliases': ['bar_alias1', 'bar_alias2'],
+ },
+ },
+ },
+ 'deprecation_aliases': {
+ 'type': 'str',
+ 'aliases': [
+ 'deprecation_aliases_version',
+ 'deprecation_aliases_date',
+ ],
+ 'deprecated_aliases': [
+ {
+ 'name': 'deprecation_aliases_version',
+ 'version': '2.0.0',
+ 'collection_name': 'foo.bar',
+ },
+ {
+ 'name': 'deprecation_aliases_date',
+ 'date': '2023-01-01',
+ 'collection_name': 'foo.bar',
+ },
+ ],
+ },
+ 'deprecation_param_version': {
+ 'type': 'str',
+ 'removed_in_version': '2.0.0',
+ 'removed_from_collection': 'foo.bar',
+ },
+ 'deprecation_param_date': {
+ 'type': 'str',
+ 'removed_at_date': '2023-01-01',
+ 'removed_from_collection': 'foo.bar',
+ },
+ 'subdeprecation': {
+ 'aliases': [
+ 'subdeprecation_alias',
+ ],
+ 'type': 'dict',
+ 'options': {
+ 'deprecation_aliases': {
+ 'type': 'str',
+ 'aliases': [
+ 'deprecation_aliases_version',
+ 'deprecation_aliases_date',
+ ],
+ 'deprecated_aliases': [
+ {
+ 'name': 'deprecation_aliases_version',
+ 'version': '2.0.0',
+ 'collection_name': 'foo.bar',
+ },
+ {
+ 'name': 'deprecation_aliases_date',
+ 'date': '2023-01-01',
+ 'collection_name': 'foo.bar',
+ },
+ ],
+ },
+ 'deprecation_param_version': {
+ 'type': 'str',
+ 'removed_in_version': '2.0.0',
+ 'removed_from_collection': 'foo.bar',
+ },
+ 'deprecation_param_date': {
+ 'type': 'str',
+ 'removed_at_date': '2023-01-01',
+ 'removed_from_collection': 'foo.bar',
},
},
},
+ 'subdeprecation_list': {
+ 'type': 'list',
+ 'elements': 'dict',
+ 'options': {
+ 'deprecation_aliases': {
+ 'type': 'str',
+ 'aliases': [
+ 'deprecation_aliases_version',
+ 'deprecation_aliases_date',
+ ],
+ 'deprecated_aliases': [
+ {
+ 'name': 'deprecation_aliases_version',
+ 'version': '2.0.0',
+ 'collection_name': 'foo.bar',
+ },
+ {
+ 'name': 'deprecation_aliases_date',
+ 'date': '2023-01-01',
+ 'collection_name': 'foo.bar',
+ },
+ ],
+ },
+ 'deprecation_param_version': {
+ 'type': 'str',
+ 'removed_in_version': '2.0.0',
+ 'removed_from_collection': 'foo.bar',
+ },
+ 'deprecation_param_date': {
+ 'type': 'str',
+ 'removed_at_date': '2023-01-01',
+ 'removed_from_collection': 'foo.bar',
+ },
+ },
+ }
},
required_if=(
('state', 'present', ('path', 'content'), True),
diff --git a/test/integration/targets/argspec/tasks/main.yml b/test/integration/targets/argspec/tasks/main.yml
index 283c922d..6e8ec054 100644
--- a/test/integration/targets/argspec/tasks/main.yml
+++ b/test/integration/targets/argspec/tasks/main.yml
@@ -366,6 +366,130 @@
foo: bar
register: argspec_apply_defaults_one
+- argspec:
+ required: value
+ required_one_of_one: value
+ deprecation_aliases_version: value
+ register: deprecation_alias_version
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ deprecation_aliases_date: value
+ register: deprecation_alias_date
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ deprecation_param_version: value
+ register: deprecation_param_version
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ deprecation_param_date: value
+ register: deprecation_param_date
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ subdeprecation:
+ deprecation_aliases_version: value
+ register: sub_deprecation_alias_version
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ subdeprecation:
+ deprecation_aliases_date: value
+ register: sub_deprecation_alias_date
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ subdeprecation:
+ deprecation_param_version: value
+ register: sub_deprecation_param_version
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ subdeprecation:
+ deprecation_param_date: value
+ register: sub_deprecation_param_date
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ subdeprecation_alias:
+ deprecation_aliases_version: value
+ register: subalias_deprecation_alias_version
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ subdeprecation_alias:
+ deprecation_aliases_date: value
+ register: subalias_deprecation_alias_date
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ subdeprecation_alias:
+ deprecation_param_version: value
+ register: subalias_deprecation_param_version
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ subdeprecation_alias:
+ deprecation_param_date: value
+ register: subalias_deprecation_param_date
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ subdeprecation_list:
+ - deprecation_aliases_version: value
+ register: sublist_deprecation_alias_version
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ subdeprecation_list:
+ - deprecation_aliases_date: value
+ register: sublist_deprecation_alias_date
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ subdeprecation_list:
+ - deprecation_param_version: value
+ register: sublist_deprecation_param_version
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ subdeprecation_list:
+ - deprecation_param_date: value
+ register: sublist_deprecation_param_date
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ apply_defaults:
+ bar_alias1: foo
+ bar_alias2: baz
+ register: alias_warning_dict
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ required_one_of:
+ - other: foo
+ other_alias: bar
+ register: alias_warning_listdict
+
- assert:
that:
- argspec_required_fail is failed
@@ -446,3 +570,90 @@
- "argspec_apply_defaults_none.apply_defaults == {'foo': none, 'bar': 'baz'}"
- "argspec_apply_defaults_empty.apply_defaults == {'foo': none, 'bar': 'baz'}"
- "argspec_apply_defaults_one.apply_defaults == {'foo': 'bar', 'bar': 'baz'}"
+
+ - deprecation_alias_version.deprecations | length == 1
+ - deprecation_alias_version.deprecations[0].msg == "Alias 'deprecation_aliases_version' is deprecated. See the module docs for more information"
+ - deprecation_alias_version.deprecations[0].collection_name == 'foo.bar'
+ - deprecation_alias_version.deprecations[0].version == '2.0.0'
+ - "'date' not in deprecation_alias_version.deprecations[0]"
+ - deprecation_alias_date.deprecations | length == 1
+ - deprecation_alias_date.deprecations[0].msg == "Alias 'deprecation_aliases_date' is deprecated. See the module docs for more information"
+ - deprecation_alias_date.deprecations[0].collection_name == 'foo.bar'
+ - deprecation_alias_date.deprecations[0].date == '2023-01-01'
+ - "'version' not in deprecation_alias_date.deprecations[0]"
+ - deprecation_param_version.deprecations | length == 1
+ - deprecation_param_version.deprecations[0].msg == "Param 'deprecation_param_version' is deprecated. See the module docs for more information"
+ - deprecation_param_version.deprecations[0].collection_name == 'foo.bar'
+ - deprecation_param_version.deprecations[0].version == '2.0.0'
+ - "'date' not in deprecation_param_version.deprecations[0]"
+ - deprecation_param_date.deprecations | length == 1
+ - deprecation_param_date.deprecations[0].msg == "Param 'deprecation_param_date' is deprecated. See the module docs for more information"
+ - deprecation_param_date.deprecations[0].collection_name == 'foo.bar'
+ - deprecation_param_date.deprecations[0].date == '2023-01-01'
+ - "'version' not in deprecation_param_date.deprecations[0]"
+
+ - sub_deprecation_alias_version.deprecations | length == 1
+ - sub_deprecation_alias_version.deprecations[0].msg == "Alias 'subdeprecation.deprecation_aliases_version' is deprecated. See the module docs for more information"
+ - sub_deprecation_alias_version.deprecations[0].collection_name == 'foo.bar'
+ - sub_deprecation_alias_version.deprecations[0].version == '2.0.0'
+ - "'date' not in sub_deprecation_alias_version.deprecations[0]"
+ - sub_deprecation_alias_date.deprecations | length == 1
+ - sub_deprecation_alias_date.deprecations[0].msg == "Alias 'subdeprecation.deprecation_aliases_date' is deprecated. See the module docs for more information"
+ - sub_deprecation_alias_date.deprecations[0].collection_name == 'foo.bar'
+ - sub_deprecation_alias_date.deprecations[0].date == '2023-01-01'
+ - "'version' not in sub_deprecation_alias_date.deprecations[0]"
+ - sub_deprecation_param_version.deprecations | length == 1
+ - sub_deprecation_param_version.deprecations[0].msg == "Param 'subdeprecation[\"deprecation_param_version\"]' is deprecated. See the module docs for more information"
+ - sub_deprecation_param_version.deprecations[0].collection_name == 'foo.bar'
+ - sub_deprecation_param_version.deprecations[0].version == '2.0.0'
+ - "'date' not in sub_deprecation_param_version.deprecations[0]"
+ - sub_deprecation_param_date.deprecations | length == 1
+ - sub_deprecation_param_date.deprecations[0].msg == "Param 'subdeprecation[\"deprecation_param_date\"]' is deprecated. See the module docs for more information"
+ - sub_deprecation_param_date.deprecations[0].collection_name == 'foo.bar'
+ - sub_deprecation_param_date.deprecations[0].date == '2023-01-01'
+ - "'version' not in sub_deprecation_param_date.deprecations[0]"
+
+ - subalias_deprecation_alias_version.deprecations | length == 1
+ - subalias_deprecation_alias_version.deprecations[0].msg == "Alias 'subdeprecation.deprecation_aliases_version' is deprecated. See the module docs for more information"
+ - subalias_deprecation_alias_version.deprecations[0].collection_name == 'foo.bar'
+ - subalias_deprecation_alias_version.deprecations[0].version == '2.0.0'
+ - "'date' not in subalias_deprecation_alias_version.deprecations[0]"
+ - subalias_deprecation_alias_date.deprecations | length == 1
+ - subalias_deprecation_alias_date.deprecations[0].msg == "Alias 'subdeprecation.deprecation_aliases_date' is deprecated. See the module docs for more information"
+ - subalias_deprecation_alias_date.deprecations[0].collection_name == 'foo.bar'
+ - subalias_deprecation_alias_date.deprecations[0].date == '2023-01-01'
+ - "'version' not in subalias_deprecation_alias_date.deprecations[0]"
+ - subalias_deprecation_param_version.deprecations | length == 1
+ - subalias_deprecation_param_version.deprecations[0].msg == "Param 'subdeprecation[\"deprecation_param_version\"]' is deprecated. See the module docs for more information"
+ - subalias_deprecation_param_version.deprecations[0].collection_name == 'foo.bar'
+ - subalias_deprecation_param_version.deprecations[0].version == '2.0.0'
+ - "'date' not in subalias_deprecation_param_version.deprecations[0]"
+ - subalias_deprecation_param_date.deprecations | length == 1
+ - subalias_deprecation_param_date.deprecations[0].msg == "Param 'subdeprecation[\"deprecation_param_date\"]' is deprecated. See the module docs for more information"
+ - subalias_deprecation_param_date.deprecations[0].collection_name == 'foo.bar'
+ - subalias_deprecation_param_date.deprecations[0].date == '2023-01-01'
+ - "'version' not in subalias_deprecation_param_date.deprecations[0]"
+
+ - sublist_deprecation_alias_version.deprecations | length == 1
+ - sublist_deprecation_alias_version.deprecations[0].msg == "Alias 'subdeprecation_list[0].deprecation_aliases_version' is deprecated. See the module docs for more information"
+ - sublist_deprecation_alias_version.deprecations[0].collection_name == 'foo.bar'
+ - sublist_deprecation_alias_version.deprecations[0].version == '2.0.0'
+ - "'date' not in sublist_deprecation_alias_version.deprecations[0]"
+ - sublist_deprecation_alias_date.deprecations | length == 1
+ - sublist_deprecation_alias_date.deprecations[0].msg == "Alias 'subdeprecation_list[0].deprecation_aliases_date' is deprecated. See the module docs for more information"
+ - sublist_deprecation_alias_date.deprecations[0].collection_name == 'foo.bar'
+ - sublist_deprecation_alias_date.deprecations[0].date == '2023-01-01'
+ - "'version' not in sublist_deprecation_alias_date.deprecations[0]"
+ - sublist_deprecation_param_version.deprecations | length == 1
+ - sublist_deprecation_param_version.deprecations[0].msg == "Param 'subdeprecation_list[\"deprecation_param_version\"]' is deprecated. See the module docs for more information"
+ - sublist_deprecation_param_version.deprecations[0].collection_name == 'foo.bar'
+ - sublist_deprecation_param_version.deprecations[0].version == '2.0.0'
+ - "'date' not in sublist_deprecation_param_version.deprecations[0]"
+ - sublist_deprecation_param_date.deprecations | length == 1
+ - sublist_deprecation_param_date.deprecations[0].msg == "Param 'subdeprecation_list[\"deprecation_param_date\"]' is deprecated. See the module docs for more information"
+ - sublist_deprecation_param_date.deprecations[0].collection_name == 'foo.bar'
+ - sublist_deprecation_param_date.deprecations[0].date == '2023-01-01'
+ - "'version' not in sublist_deprecation_param_date.deprecations[0]"
+
+ - "'Both option apply_defaults.bar and its alias apply_defaults.bar_alias2 are set.' in alias_warning_dict.warnings"
+ - "'Both option required_one_of[0].other and its alias required_one_of[0].other_alias are set.' in alias_warning_listdict.warnings"
diff --git a/test/integration/targets/blocks/79711.yml b/test/integration/targets/blocks/79711.yml
new file mode 100644
index 00000000..ca9bfbb4
--- /dev/null
+++ b/test/integration/targets/blocks/79711.yml
@@ -0,0 +1,17 @@
+- hosts: localhost
+ gather_facts: false
+ tasks:
+ - block:
+ - block:
+ - debug:
+ - name: EXPECTED FAILURE
+ fail:
+ rescue:
+ - debug:
+ - debug:
+ - name: EXPECTED FAILURE
+ fail:
+ always:
+ - debug:
+ always:
+ - debug:
diff --git a/test/integration/targets/blocks/runme.sh b/test/integration/targets/blocks/runme.sh
index 06e3ddee..820107bb 100755
--- a/test/integration/targets/blocks/runme.sh
+++ b/test/integration/targets/blocks/runme.sh
@@ -127,3 +127,12 @@ rm -f 78612.out
ansible-playbook -vv 43191.yml
ansible-playbook -vv 43191-2.yml
+
+# https://github.com/ansible/ansible/issues/79711
+set +e
+ANSIBLE_FORCE_HANDLERS=0 ansible-playbook -vv 79711.yml | tee 79711.out
+set -e
+[ "$(grep -c 'ok=5' 79711.out)" -eq 1 ]
+[ "$(grep -c 'failed=1' 79711.out)" -eq 1 ]
+[ "$(grep -c 'rescued=1' 79711.out)" -eq 1 ]
+rm -f 79711.out
diff --git a/test/integration/targets/file/tasks/main.yml b/test/integration/targets/file/tasks/main.yml
index 3aed4917..17b0fae6 100644
--- a/test/integration/targets/file/tasks/main.yml
+++ b/test/integration/targets/file/tasks/main.yml
@@ -711,6 +711,82 @@
- group_exists.warnings is not defined
- group_gid_exists.warnings is not defined
+# ensures touching a file returns changed when needed
+# issue: https://github.com/ansible/ansible/issues/79360
+- name: touch a file returns changed in check mode if file does not exist
+ file:
+ path: '/tmp/touch_check_mode_test'
+ state: touch
+ check_mode: yes
+ register: touch_result_in_check_mode_not_existing
+
+- name: touch the file
+ file:
+ path: '/tmp/touch_check_mode_test'
+ mode: "0660"
+ state: touch
+
+- name: touch an existing file returns changed in check mode
+ file:
+ path: '/tmp/touch_check_mode_test'
+ state: touch
+ check_mode: yes
+ register: touch_result_in_check_mode_change_all_attr
+
+- name: touch an existing file returns changed in check mode when preserving access time
+ file:
+ path: '/tmp/touch_check_mode_test'
+ state: touch
+ access_time: "preserve"
+ check_mode: yes
+ register: touch_result_in_check_mode_preserve_access_time
+
+- name: touch an existing file returns changed in check mode when only mode changes
+ file:
+ path: '/tmp/touch_check_mode_test'
+ state: touch
+ access_time: "preserve"
+ modification_time: "preserve"
+ mode: "0640"
+ check_mode: yes
+ register: touch_result_in_check_mode_change_only_mode
+
+- name: touch an existing file returns ok if all attributes are preserved
+ file:
+ path: '/tmp/touch_check_mode_test'
+ state: touch
+ access_time: "preserve"
+ modification_time: "preserve"
+ check_mode: yes
+ register: touch_result_in_check_mode_all_attrs_preserved
+
+- name: touch an existing file fails in check mode when user does not exist
+ file:
+ path: '/tmp/touch_check_mode_test'
+ state: touch
+ owner: not-existing-user
+ check_mode: yes
+ ignore_errors: true
+ register: touch_result_in_check_mode_fails_not_existing_user
+
+- name: touch an existing file fails in check mode when group does not exist
+ file:
+ path: '/tmp/touch_check_mode_test'
+ state: touch
+ group: not-existing-group
+ check_mode: yes
+ ignore_errors: true
+ register: touch_result_in_check_mode_fails_not_existing_group
+
+- assert:
+ that:
+ - touch_result_in_check_mode_not_existing.changed
+ - touch_result_in_check_mode_preserve_access_time.changed
+ - touch_result_in_check_mode_change_only_mode.changed
+ - not touch_result_in_check_mode_all_attrs_preserved.changed
+ - touch_result_in_check_mode_fails_not_existing_user.warnings[0] is search('failed to look up user')
+ - touch_result_in_check_mode_fails_not_existing_group.warnings[0] is search('failed to look up group')
+
# https://github.com/ansible/ansible/issues/50943
# Need to use /tmp as nobody can't access remote_tmp_dir_test at all
- name: create file as root with all write permissions
diff --git a/test/integration/targets/get_url/tasks/ciphers.yml b/test/integration/targets/get_url/tasks/ciphers.yml
index b8ebd981..c7d9979d 100644
--- a/test/integration/targets/get_url/tasks/ciphers.yml
+++ b/test/integration/targets/get_url/tasks/ciphers.yml
@@ -6,7 +6,7 @@
register: good_ciphers
- name: test bad cipher
- uri:
+ get_url:
url: https://{{ httpbin_host }}/get
ciphers: ECDHE-ECDSA-AES128-SHA
dest: '{{ remote_tmp_dir }}/bad_cipher_get.json'
diff --git a/test/integration/targets/inventory_script/inventory.json b/test/integration/targets/inventory_script/inventory.json
index 5046a9a8..69ba5476 100644
--- a/test/integration/targets/inventory_script/inventory.json
+++ b/test/integration/targets/inventory_script/inventory.json
@@ -1029,9 +1029,9 @@
},
"all": {
"children": [
+ "ungrouped",
"None",
- "guests",
- "ungrouped"
+ "guests"
]
},
"guests": {
diff --git a/test/integration/targets/reboot/aliases b/test/integration/targets/reboot/aliases
index e9bebbf3..7f995fd6 100644
--- a/test/integration/targets/reboot/aliases
+++ b/test/integration/targets/reboot/aliases
@@ -1,2 +1,5 @@
-# No current way to split controller and test node
-unsupported
+context/target
+destructive
+needs/root
+shippable/posix/group2
+skip/docker
diff --git a/test/integration/targets/reboot/tasks/main.yml b/test/integration/targets/reboot/tasks/main.yml
index 7687cb73..4884f104 100644
--- a/test/integration/targets/reboot/tasks/main.yml
+++ b/test/integration/targets/reboot/tasks/main.yml
@@ -1,39 +1,41 @@
-- name: Test reboot
- when: ansible_facts.virtualization_type | default('') not in ['docker', 'container', 'containerd']
- block:
- # This block can be removed once we have a mechanism in ansible-test to separate
- # the control node from the managed node.
- - block:
- - name: Write temp file for sanity checking this is not the controller
- copy:
- content: 'I am the control node'
- dest: /tmp/Anything-Nutlike-Nuzzle-Plow-Overdue
- delegate_to: localhost
- connection: local
- when: inventory_hostname == ansible_play_hosts[0]
+- name: Check split state
+ stat:
+ path: "{{ output_dir }}"
+ register: split
+ ignore_errors: yes
+
+- name: >-
+ Memorize whether we're in a containerized environment
+ and/or a split controller mode
+ set_fact:
+ in_container_env: >-
+ {{
+ ansible_facts.virtualization_type | default('')
+ in ['docker', 'container', 'containerd']
+ }}
+ in_split_controller_mode: >-
+ {{ split is not success or not split.stat.exists }}
- - name: See if the temp file exists on the managed node
- stat:
- path: /tmp/Anything-Nutlike-Nuzzle-Plow-Overdue
- register: controller_temp_file
+- name: Explain why testing against a container is not an option
+ debug:
+ msg: >-
+ This test is attempting to reboot the whole host operating system.
+ The current target is a containerized environment. Containers
+ cannot be reboot like VMs. This is why the test is being skipped.
+ when: in_container_env
- - name: EXPECT FAILURE | Check if the managed node is the control node
- assert:
- msg: >
- This test must be run manually by modifying the inventory file to point
- "{{ inventory_hostname }}" at a remote host rather than "{{ ansible_host }}".
- Skipping reboot test.
- that:
- - not controller_temp_file.stat.exists
- always:
- - name: Cleanup temp file
- file:
- path: /tmp/Anything-Nutlike-Nuzzle-Plow-Overdue
- state: absent
- delegate_to: localhost
- connection: local
- when: inventory_hostname == ansible_play_hosts[0]
+- name: Explain why testing against the same host is not an option
+ debug:
+ msg: >-
+ This test is attempting to reboot the whole host operating system.
+ This means it would interrupt itself trying to reboot own
+ environment. It needs to target a separate VM or machine to be
+ able to function so it's being skipped in the current invocation.
+ when: not in_split_controller_mode
+- name: Test reboot
+ when: not in_container_env and in_split_controller_mode
+ block:
- import_tasks: test_standard_scenarios.yml
- import_tasks: test_reboot_command.yml
- import_tasks: test_invalid_parameter.yml
diff --git a/test/integration/targets/roles_arg_spec/test_complex_role_fails.yml b/test/integration/targets/roles_arg_spec/test_complex_role_fails.yml
index 8764d382..81abdaa8 100644
--- a/test/integration/targets/roles_arg_spec/test_complex_role_fails.yml
+++ b/test/integration/targets/roles_arg_spec/test_complex_role_fails.yml
@@ -168,3 +168,30 @@
- ansible_failed_result.validate_args_context.name == "test1"
- ansible_failed_result.validate_args_context.type == "role"
- "ansible_failed_result.validate_args_context.path is search('roles_arg_spec/roles/test1')"
+
+ - name: test message for missing required parameters and invalid suboptions
+ block:
+ - include_role:
+ name: test1
+ vars:
+ some_json: '{}'
+ some_jsonarg: '{}'
+ multi_level_option:
+ second_level:
+ not_a_supported_suboption: true
+
+ - fail:
+ msg: "Should not get here"
+
+ rescue:
+ - debug:
+ var: ansible_failed_result
+
+ - assert:
+ that:
+ - ansible_failed_result.argument_errors | length == 2
+ - missing_required in ansible_failed_result.argument_errors
+ - got_unexpected in ansible_failed_result.argument_errors
+ vars:
+ missing_required: "missing required arguments: third_level found in multi_level_option -> second_level"
+ got_unexpected: "multi_level_option.second_level.not_a_supported_suboption. Supported parameters include: third_level."
diff --git a/test/integration/targets/setup_epel/tasks/main.yml b/test/integration/targets/setup_epel/tasks/main.yml
index ba0eae30..a8593bb4 100644
--- a/test/integration/targets/setup_epel/tasks/main.yml
+++ b/test/integration/targets/setup_epel/tasks/main.yml
@@ -1,3 +1,8 @@
+- name: Enable RHEL7 extras
+ # EPEL 7 depends on RHEL 7 extras, which is not enabled by default on RHEL.
+ # See: https://docs.fedoraproject.org/en-US/epel/epel-policy/#_policy
+ command: yum-config-manager --enable rhel-7-server-rhui-extras-rpms
+ when: ansible_facts.distribution == 'RedHat' and ansible_facts.distribution_major_version == '7'
- name: Install EPEL
yum:
name: https://ci-files.testing.ansible.com/test/integration/targets/setup_epel/epel-release-latest-{{ ansible_distribution_major_version }}.noarch.rpm
diff --git a/test/integration/targets/var_templating/ansible_debug_template.j2 b/test/integration/targets/var_templating/ansible_debug_template.j2
new file mode 100644
index 00000000..8fe25f99
--- /dev/null
+++ b/test/integration/targets/var_templating/ansible_debug_template.j2
@@ -0,0 +1 @@
+{{ hello }}
diff --git a/test/integration/targets/var_templating/runme.sh b/test/integration/targets/var_templating/runme.sh
index 9363cb3a..bcf09241 100755
--- a/test/integration/targets/var_templating/runme.sh
+++ b/test/integration/targets/var_templating/runme.sh
@@ -16,3 +16,6 @@ ansible-playbook task_vars_templating.yml -v "$@"
# there should be an attempt to use 'sudo' in the connection debug output
ANSIBLE_BECOME_ALLOW_SAME_USER=true ansible-playbook test_connection_vars.yml -vvvv "$@" | tee /dev/stderr | grep 'sudo \-H \-S'
+
+# smoke test usage of VarsWithSources that is used when ANSIBLE_DEBUG=1
+ANSIBLE_DEBUG=1 ansible-playbook test_vars_with_sources.yml -v "$@"
diff --git a/test/integration/targets/var_templating/test_vars_with_sources.yml b/test/integration/targets/var_templating/test_vars_with_sources.yml
new file mode 100644
index 00000000..0b8c990e
--- /dev/null
+++ b/test/integration/targets/var_templating/test_vars_with_sources.yml
@@ -0,0 +1,9 @@
+- hosts: localhost
+ gather_facts: false
+ tasks:
+ - template:
+ src: ansible_debug_template.j2
+ dest: "{{ output_dir }}/ansible_debug_templated.txt"
+ vars:
+ output_dir: "{{ lookup('env', 'OUTPUT_DIR') }}"
+ hello: hello
diff --git a/test/integration/targets/yum/tasks/yuminstallroot.yml b/test/integration/targets/yum/tasks/yuminstallroot.yml
index bb69151a..028e8059 100644
--- a/test/integration/targets/yum/tasks/yuminstallroot.yml
+++ b/test/integration/targets/yum/tasks/yuminstallroot.yml
@@ -76,13 +76,6 @@
- ansible_facts["distribution_major_version"] == "7"
- ansible_facts["distribution"] == "RedHat"
block:
- # Need to enable this RHUI repo for RHEL7 testing in AWS, CentOS has Extras
- # enabled by default and this is not needed there.
- - name: enable rhel-7-server-rhui-extras-rpms repo for RHEL7
- command: yum-config-manager --enable rhel-7-server-rhui-extras-rpms
- - name: update cache to pull repodata
- yum:
- update_cache: yes
- name: install required packages for buildah test
yum:
state: present
@@ -137,5 +130,3 @@
state: absent
name:
- buildah
- - name: disable rhel-7-server-rhui-extras-rpms repo for RHEL7
- command: yum-config-manager --disable rhel-7-server-rhui-extras-rpms
diff --git a/test/lib/ansible_test/_data/completion/docker.txt b/test/lib/ansible_test/_data/completion/docker.txt
index ad5e9764..9e1a9d5e 100644
--- a/test/lib/ansible_test/_data/completion/docker.txt
+++ b/test/lib/ansible_test/_data/completion/docker.txt
@@ -1,9 +1,9 @@
-base image=quay.io/ansible/base-test-container:3.9.0 python=3.11,2.7,3.5,3.6,3.7,3.8,3.9,3.10 seccomp=unconfined
-default image=quay.io/ansible/default-test-container:6.13.0 python=3.11,2.7,3.5,3.6,3.7,3.8,3.9,3.10 seccomp=unconfined context=collection
-default image=quay.io/ansible/ansible-core-test-container:6.13.0 python=3.11,2.7,3.5,3.6,3.7,3.8,3.9,3.10 seccomp=unconfined context=ansible-core
-alpine3 image=quay.io/ansible/alpine3-test-container:4.8.0 python=3.10
-centos7 image=quay.io/ansible/centos7-test-container:4.8.0 python=2.7 seccomp=unconfined
-fedora36 image=quay.io/ansible/fedora36-test-container:4.8.0 python=3.10 seccomp=unconfined
+base image=quay.io/ansible/base-test-container:3.9.0 python=3.11,2.7,3.5,3.6,3.7,3.8,3.9,3.10
+default image=quay.io/ansible/default-test-container:6.13.0 python=3.11,2.7,3.5,3.6,3.7,3.8,3.9,3.10 context=collection
+default image=quay.io/ansible/ansible-core-test-container:6.13.0 python=3.11,2.7,3.5,3.6,3.7,3.8,3.9,3.10 context=ansible-core
+alpine3 image=quay.io/ansible/alpine3-test-container:4.8.0 python=3.10 cgroup=none audit=none
+centos7 image=quay.io/ansible/centos7-test-container:4.8.0 python=2.7 cgroup=v1-only
+fedora36 image=quay.io/ansible/fedora36-test-container:4.8.0 python=3.10
opensuse15 image=quay.io/ansible/opensuse15-test-container:4.8.0 python=3.6
-ubuntu2004 image=quay.io/ansible/ubuntu2004-test-container:4.8.0 python=3.8 seccomp=unconfined
-ubuntu2204 image=quay.io/ansible/ubuntu2204-test-container:4.8.0 python=3.10 seccomp=unconfined
+ubuntu2004 image=quay.io/ansible/ubuntu2004-test-container:4.8.0 python=3.8
+ubuntu2204 image=quay.io/ansible/ubuntu2204-test-container:4.8.0 python=3.10
diff --git a/test/lib/ansible_test/_internal/__init__.py b/test/lib/ansible_test/_internal/__init__.py
index 3ed74ef6..d218b561 100644
--- a/test/lib/ansible_test/_internal/__init__.py
+++ b/test/lib/ansible_test/_internal/__init__.py
@@ -11,8 +11,13 @@ from .init import (
CURRENT_RLIMIT_NOFILE,
)
+from .constants import (
+ STATUS_HOST_CONNECTION_ERROR,
+)
+
from .util import (
ApplicationError,
+ HostConnectionError,
display,
report_locale,
)
@@ -94,6 +99,10 @@ def main(cli_args: t.Optional[list[str]] = None) -> None:
display.review_warnings()
config.success = True
+ except HostConnectionError as ex:
+ display.fatal(str(ex))
+ ex.run_callback()
+ sys.exit(STATUS_HOST_CONNECTION_ERROR)
except ApplicationWarning as ex:
display.warning('%s' % ex)
sys.exit(0)
diff --git a/test/lib/ansible_test/_internal/ansible_util.py b/test/lib/ansible_test/_internal/ansible_util.py
index 679ca75e..5798d352 100644
--- a/test/lib/ansible_test/_internal/ansible_util.py
+++ b/test/lib/ansible_test/_internal/ansible_util.py
@@ -51,6 +51,10 @@ from .host_configs import (
PythonConfig,
)
+from .thread import (
+ mutex,
+)
+
def parse_inventory(args: EnvironmentConfig, inventory_path: str) -> dict[str, t.Any]:
"""Return a dict parsed from the given inventory file."""
@@ -192,6 +196,7 @@ def configure_plugin_paths(args: CommonConfig) -> dict[str, str]:
return env
+@mutex
def get_ansible_python_path(args: CommonConfig) -> str:
"""
Return a directory usable for PYTHONPATH, containing only the ansible package.
diff --git a/test/lib/ansible_test/_internal/cgroup.py b/test/lib/ansible_test/_internal/cgroup.py
new file mode 100644
index 00000000..b55d878d
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cgroup.py
@@ -0,0 +1,110 @@
+"""Linux control group constants, classes and utilities."""
+from __future__ import annotations
+
+import codecs
+import dataclasses
+import pathlib
+import re
+
+
+class CGroupPath:
+ """Linux cgroup path constants."""
+ ROOT = '/sys/fs/cgroup'
+ SYSTEMD = '/sys/fs/cgroup/systemd'
+ SYSTEMD_RELEASE_AGENT = '/sys/fs/cgroup/systemd/release_agent'
+
+
+class MountType:
+ """Linux filesystem mount type constants."""
+ TMPFS = 'tmpfs'
+ CGROUP_V1 = 'cgroup'
+ CGROUP_V2 = 'cgroup2'
+
+
+@dataclasses.dataclass(frozen=True)
+class CGroupEntry:
+ """A single cgroup entry parsed from '/proc/{pid}/cgroup' in the proc filesystem."""
+ id: int
+ subsystem: str
+ path: pathlib.PurePosixPath
+
+ @property
+ def root_path(self):
+ """The root path for this cgroup subsystem."""
+ return pathlib.PurePosixPath(CGroupPath.ROOT, self.subsystem)
+
+ @property
+ def full_path(self) -> pathlib.PurePosixPath:
+ """The full path for this cgroup subsystem."""
+ return pathlib.PurePosixPath(self.root_path, str(self.path).lstrip('/'))
+
+ @classmethod
+ def parse(cls, value: str) -> CGroupEntry:
+ """Parse the given cgroup line from the proc filesystem and return a cgroup entry."""
+ cid, subsystem, path = value.split(':')
+
+ return cls(
+ id=int(cid),
+ subsystem=subsystem.removeprefix('name='),
+ path=pathlib.PurePosixPath(path)
+ )
+
+ @classmethod
+ def loads(cls, value: str) -> tuple[CGroupEntry, ...]:
+ """Parse the given output from the proc filesystem and return a tuple of cgroup entries."""
+ return tuple(cls.parse(line) for line in value.splitlines())
+
+
+@dataclasses.dataclass(frozen=True)
+class MountEntry:
+ """A single mount info entry parsed from '/proc/{pid}/mountinfo' in the proc filesystem."""
+ mount_id: int
+ parent_id: int
+ device_major: int
+ device_minor: int
+ root: pathlib.PurePosixPath
+ path: pathlib.PurePosixPath
+ options: tuple[str, ...]
+ fields: tuple[str, ...]
+ type: str
+ source: pathlib.PurePosixPath
+ super_options: tuple[str, ...]
+
+ @classmethod
+ def parse(cls, value: str) -> MountEntry:
+ """Parse the given mount info line from the proc filesystem and return a mount entry."""
+ # See: https://man7.org/linux/man-pages/man5/proc.5.html
+ # See: https://github.com/torvalds/linux/blob/aea23e7c464bfdec04b52cf61edb62030e9e0d0a/fs/proc_namespace.c#L135
+ mount_id, parent_id, device_major_minor, root, path, options, *remainder = value.split(' ')
+ fields = remainder[:-4]
+ separator, mtype, source, super_options = remainder[-4:]
+
+ assert separator == '-'
+
+ device_major, device_minor = device_major_minor.split(':')
+
+ return cls(
+ mount_id=int(mount_id),
+ parent_id=int(parent_id),
+ device_major=int(device_major),
+ device_minor=int(device_minor),
+ root=_decode_path(root),
+ path=_decode_path(path),
+ options=tuple(options.split(',')),
+ fields=tuple(fields),
+ type=mtype,
+ source=_decode_path(source),
+ super_options=tuple(super_options.split(',')),
+ )
+
+ @classmethod
+ def loads(cls, value: str) -> tuple[MountEntry, ...]:
+ """Parse the given output from the proc filesystem and return a tuple of mount info entries."""
+ return tuple(cls.parse(line) for line in value.splitlines())
+
+
+def _decode_path(value: str) -> pathlib.PurePosixPath:
+ """Decode and return a path which may contain octal escape sequences."""
+ # See: https://github.com/torvalds/linux/blob/aea23e7c464bfdec04b52cf61edb62030e9e0d0a/fs/proc_namespace.c#L150
+ path = re.sub(r'(\\[0-7]{3})', lambda m: codecs.decode(m.group(0).encode('ascii'), 'unicode_escape'), value)
+ return pathlib.PurePosixPath(path)
diff --git a/test/lib/ansible_test/_internal/cli/argparsing/parsers.py b/test/lib/ansible_test/_internal/cli/argparsing/parsers.py
index a2e40475..429b9c0c 100644
--- a/test/lib/ansible_test/_internal/cli/argparsing/parsers.py
+++ b/test/lib/ansible_test/_internal/cli/argparsing/parsers.py
@@ -22,24 +22,26 @@ ASSIGNMENT_DELIMITER = '='
PATH_DELIMITER = '/'
-@dataclasses.dataclass(frozen=True)
+# This class was originally frozen. However, that causes issues when running under Python 3.11.
+# See: https://github.com/python/cpython/issues/99856
+@dataclasses.dataclass
class Completion(Exception):
"""Base class for argument completion results."""
-@dataclasses.dataclass(frozen=True)
+@dataclasses.dataclass
class CompletionUnavailable(Completion):
"""Argument completion unavailable."""
message: str = 'No completions available.'
-@dataclasses.dataclass(frozen=True)
+@dataclasses.dataclass
class CompletionError(Completion):
"""Argument completion error."""
message: t.Optional[str] = None
-@dataclasses.dataclass(frozen=True)
+@dataclasses.dataclass
class CompletionSuccess(Completion):
"""Successful argument completion result."""
list_mode: bool
@@ -287,6 +289,19 @@ class ChoicesParser(DynamicChoicesParser):
return '|'.join(self.choices)
+class EnumValueChoicesParser(ChoicesParser):
+ """Composite argument parser which relies on a static list of choices derived from the values of an enum."""
+ def __init__(self, enum_type: t.Type[enum.Enum], conditions: MatchConditions = MatchConditions.CHOICE) -> None:
+ self.enum_type = enum_type
+
+ super().__init__(choices=[str(item.value) for item in enum_type], conditions=conditions)
+
+ def parse(self, state: ParserState) -> t.Any:
+ """Parse the input from the given state and return the result."""
+ value = super().parse(state)
+ return self.enum_type(value)
+
+
class IntegerParser(DynamicChoicesParser):
"""Composite argument parser for integers."""
PATTERN = re.compile('^[1-9][0-9]*$')
diff --git a/test/lib/ansible_test/_internal/cli/environments.py b/test/lib/ansible_test/_internal/cli/environments.py
index 7c8e1060..1dde9e63 100644
--- a/test/lib/ansible_test/_internal/cli/environments.py
+++ b/test/lib/ansible_test/_internal/cli/environments.py
@@ -397,6 +397,8 @@ def add_global_docker(
docker_network=None,
docker_terminate=None,
prime_containers=False,
+ dev_systemd_debug=False,
+ dev_probe_cgroups=None,
)
return
@@ -428,6 +430,24 @@ def add_global_docker(
help='download containers without running tests',
)
+ # Docker support isn't related to ansible-core-ci.
+ # However, ansible-core-ci support is a reasonable indicator that the user may need the `--dev-*` options.
+ suppress = None if get_ci_provider().supports_core_ci_auth() else argparse.SUPPRESS
+
+ parser.add_argument(
+ '--dev-systemd-debug',
+ action='store_true',
+ help=suppress or 'enable systemd debugging in containers',
+ )
+
+ parser.add_argument(
+ '--dev-probe-cgroups',
+ metavar='DIR',
+ nargs='?',
+ const='',
+ help=suppress or 'probe container cgroups, with optional log dir',
+ )
+
def add_environment_docker(
exclusive_parser: argparse.ArgumentParser,
diff --git a/test/lib/ansible_test/_internal/cli/parsers/key_value_parsers.py b/test/lib/ansible_test/_internal/cli/parsers/key_value_parsers.py
index 7f184c37..a6af7f80 100644
--- a/test/lib/ansible_test/_internal/cli/parsers/key_value_parsers.py
+++ b/test/lib/ansible_test/_internal/cli/parsers/key_value_parsers.py
@@ -10,6 +10,11 @@ from ...constants import (
SUPPORTED_PYTHON_VERSIONS,
)
+from ...completion import (
+ AuditMode,
+ CGroupVersion,
+)
+
from ...util import (
REMOTE_ARCHITECTURES,
)
@@ -27,6 +32,7 @@ from ..argparsing.parsers import (
BooleanParser,
ChoicesParser,
DocumentationState,
+ EnumValueChoicesParser,
IntegerParser,
KeyValueParser,
Parser,
@@ -103,6 +109,8 @@ class DockerKeyValueParser(KeyValueParser):
return dict(
python=PythonParser(versions=self.versions, allow_venv=False, allow_default=self.allow_default),
seccomp=ChoicesParser(SECCOMP_CHOICES),
+ cgroup=EnumValueChoicesParser(CGroupVersion),
+ audit=EnumValueChoicesParser(AuditMode),
privileged=BooleanParser(),
memory=IntegerParser(),
)
@@ -116,6 +124,8 @@ class DockerKeyValueParser(KeyValueParser):
state.sections[f'{"controller" if self.controller else "target"} {section_name} (comma separated):'] = '\n'.join([
f' python={python_parser.document(state)}',
f' seccomp={ChoicesParser(SECCOMP_CHOICES).document(state)}',
+ f' cgroup={EnumValueChoicesParser(CGroupVersion).document(state)}',
+ f' audit={EnumValueChoicesParser(AuditMode).document(state)}',
f' privileged={BooleanParser().document(state)}',
f' memory={IntegerParser().document(state)} # bytes',
])
diff --git a/test/lib/ansible_test/_internal/commands/env/__init__.py b/test/lib/ansible_test/_internal/commands/env/__init__.py
index b4ee2438..44f229f8 100644
--- a/test/lib/ansible_test/_internal/commands/env/__init__.py
+++ b/test/lib/ansible_test/_internal/commands/env/__init__.py
@@ -17,9 +17,9 @@ from ...io import (
from ...util import (
display,
- SubprocessError,
get_ansible_version,
get_available_python_versions,
+ ApplicationError,
)
from ...util_common import (
@@ -30,8 +30,8 @@ from ...util_common import (
from ...docker_util import (
get_docker_command,
- docker_info,
- docker_version
+ get_docker_info,
+ get_docker_container_id,
)
from ...constants import (
@@ -70,11 +70,14 @@ def show_dump_env(args: EnvConfig) -> None:
if not args.show and not args.dump:
return
+ container_id = get_docker_container_id()
+
data = dict(
ansible=dict(
version=get_ansible_version(),
),
docker=get_docker_details(args),
+ container_id=container_id,
environ=os.environ.copy(),
location=dict(
pwd=os.environ.get('PWD', None),
@@ -178,14 +181,12 @@ def get_docker_details(args: EnvConfig) -> dict[str, t.Any]:
executable = docker.executable
try:
- info = docker_info(args)
- except SubprocessError as ex:
- display.warning('Failed to collect docker info:\n%s' % ex)
-
- try:
- version = docker_version(args)
- except SubprocessError as ex:
- display.warning('Failed to collect docker version:\n%s' % ex)
+ docker_info = get_docker_info(args)
+ except ApplicationError as ex:
+ display.warning(str(ex))
+ else:
+ info = docker_info.info
+ version = docker_info.version
docker_details = dict(
executable=executable,
diff --git a/test/lib/ansible_test/_internal/commands/integration/__init__.py b/test/lib/ansible_test/_internal/commands/integration/__init__.py
index e4d827aa..33bd45f6 100644
--- a/test/lib/ansible_test/_internal/commands/integration/__init__.py
+++ b/test/lib/ansible_test/_internal/commands/integration/__init__.py
@@ -99,6 +99,7 @@ from ...host_configs import (
from ...host_profiles import (
ControllerProfile,
+ ControllerHostProfile,
HostProfile,
PosixProfile,
SshTargetHostProfile,
@@ -531,6 +532,10 @@ def command_integration_filtered(
if not tries:
raise
+ if target.retry_never:
+ display.warning(f'Skipping retry of test target "{target.name}" since it has been excluded from retries.')
+ raise
+
display.warning('Retrying test target "%s" with maximum verbosity.' % target.name)
display.verbosity = args.verbosity = 6
@@ -957,13 +962,10 @@ def command_integration_filter(args: TIntegrationConfig,
return host_state, internal_targets
-def requirements(args: IntegrationConfig, host_state: HostState) -> None:
- """Install requirements."""
- target_profile = host_state.target_profiles[0]
-
- configure_pypi_proxy(args, host_state.controller_profile) # integration, windows-integration, network-integration
-
- if isinstance(target_profile, PosixProfile) and not isinstance(target_profile, ControllerProfile):
- configure_pypi_proxy(args, target_profile) # integration
-
- install_requirements(args, host_state.controller_profile.python, ansible=True, command=True) # integration, windows-integration, network-integration
+def requirements(host_profile: HostProfile) -> None:
+ """Install requirements after bootstrapping and delegation."""
+ if isinstance(host_profile, ControllerHostProfile) and host_profile.controller:
+ configure_pypi_proxy(host_profile.args, host_profile) # integration, windows-integration, network-integration
+ install_requirements(host_profile.args, host_profile.python, ansible=True, command=True) # integration, windows-integration, network-integration
+ elif isinstance(host_profile, PosixProfile) and not isinstance(host_profile, ControllerProfile):
+ configure_pypi_proxy(host_profile.args, host_profile) # integration
diff --git a/test/lib/ansible_test/_internal/commands/shell/__init__.py b/test/lib/ansible_test/_internal/commands/shell/__init__.py
index 5733ff2f..5e8c101a 100644
--- a/test/lib/ansible_test/_internal/commands/shell/__init__.py
+++ b/test/lib/ansible_test/_internal/commands/shell/__init__.py
@@ -9,6 +9,8 @@ from ...util import (
ApplicationError,
OutputStream,
display,
+ SubprocessError,
+ HostConnectionError,
)
from ...config import (
@@ -115,4 +117,19 @@ def command_shell(args: ShellConfig) -> None:
else:
cmd = []
- con.run(cmd, capture=False, interactive=True)
+ try:
+ con.run(cmd, capture=False, interactive=True)
+ except SubprocessError as ex:
+ if isinstance(con, SshConnection) and ex.status == 255:
+ # 255 indicates SSH itself failed, rather than a command run on the remote host.
+ # In this case, report a host connection error so additional troubleshooting output is provided.
+ if not args.delegate and not args.host_path:
+ def callback() -> None:
+ """Callback to run during error display."""
+ target_profile.on_target_failure() # when the controller is not delegated, report failures immediately
+ else:
+ callback = None
+
+ raise HostConnectionError(f'SSH shell connection failed for host {target_profile.config}: {ex}', callback) from ex
+
+ raise
diff --git a/test/lib/ansible_test/_internal/completion.py b/test/lib/ansible_test/_internal/completion.py
index a370d800..ee096772 100644
--- a/test/lib/ansible_test/_internal/completion.py
+++ b/test/lib/ansible_test/_internal/completion.py
@@ -3,6 +3,7 @@ from __future__ import annotations
import abc
import dataclasses
+import enum
import os
import typing as t
@@ -26,6 +27,26 @@ from .become import (
)
+class CGroupVersion(enum.Enum):
+ """The control group version(s) required by a container."""
+ NONE = 'none'
+ V1_ONLY = 'v1-only'
+ V2_ONLY = 'v2-only'
+ V1_V2 = 'v1-v2'
+
+ def __repr__(self) -> str:
+ return f'{self.__class__.__name__}.{self.name}'
+
+
+class AuditMode(enum.Enum):
+ """The audit requirements of a container."""
+ NONE = 'none'
+ REQUIRED = 'required'
+
+ def __repr__(self) -> str:
+ return f'{self.__class__.__name__}.{self.name}'
+
+
@dataclasses.dataclass(frozen=True)
class CompletionConfig(metaclass=abc.ABCMeta):
"""Base class for completion configuration."""
@@ -140,6 +161,8 @@ class DockerCompletionConfig(PythonCompletionConfig):
"""Configuration for Docker containers."""
image: str = ''
seccomp: str = 'default'
+ cgroup: str = CGroupVersion.V1_V2.value
+ audit: str = AuditMode.REQUIRED.value # most containers need this, so the default is required, leaving it to be opt-out for containers which don't need it
placeholder: bool = False
@property
@@ -147,6 +170,22 @@ class DockerCompletionConfig(PythonCompletionConfig):
"""True if the completion entry is only used for defaults, otherwise False."""
return False
+ @property
+ def audit_enum(self) -> AuditMode:
+ """The audit requirements for the container. Raises an exception if the value is invalid."""
+ try:
+ return AuditMode(self.audit)
+ except ValueError:
+ raise ValueError(f'Docker completion entry "{self.name}" has an invalid value "{self.audit}" for the "audit" setting.') from None
+
+ @property
+ def cgroup_enum(self) -> CGroupVersion:
+ """The control group version(s) required by the container. Raises an exception if the value is invalid."""
+ try:
+ return CGroupVersion(self.cgroup)
+ except ValueError:
+ raise ValueError(f'Docker completion entry "{self.name}" has an invalid value "{self.cgroup}" for the "cgroup" setting.') from None
+
def __post_init__(self):
if not self.image:
raise Exception(f'Docker completion entry "{self.name}" must provide an "image" setting.')
@@ -154,6 +193,10 @@ class DockerCompletionConfig(PythonCompletionConfig):
if not self.supported_pythons and not self.placeholder:
raise Exception(f'Docker completion entry "{self.name}" must provide a "python" setting.')
+ # verify properties can be correctly parsed to enums
+ assert self.audit_enum
+ assert self.cgroup_enum
+
@dataclasses.dataclass(frozen=True)
class NetworkRemoteCompletionConfig(RemoteCompletionConfig):
diff --git a/test/lib/ansible_test/_internal/config.py b/test/lib/ansible_test/_internal/config.py
index 84eefdbc..372c23ab 100644
--- a/test/lib/ansible_test/_internal/config.py
+++ b/test/lib/ansible_test/_internal/config.py
@@ -111,6 +111,9 @@ class EnvironmentConfig(CommonConfig):
self.delegate_args: list[str] = []
+ self.dev_systemd_debug: bool = args.dev_systemd_debug
+ self.dev_probe_cgroups: t.Optional[str] = args.dev_probe_cgroups
+
def host_callback(files: list[tuple[str, str]]) -> None:
"""Add the host files to the payload file list."""
config = self
diff --git a/test/lib/ansible_test/_internal/connections.py b/test/lib/ansible_test/_internal/connections.py
index 829d9d32..4823b1a4 100644
--- a/test/lib/ansible_test/_internal/connections.py
+++ b/test/lib/ansible_test/_internal/connections.py
@@ -34,6 +34,7 @@ from .docker_util import (
from .ssh import (
SshConnectionDetail,
+ ssh_options_to_list,
)
from .become import (
@@ -123,7 +124,7 @@ class SshConnection(Connection):
self.options = ['-i', settings.identity_file]
- ssh_options = dict(
+ ssh_options: dict[str, t.Union[int, str]] = dict(
BatchMode='yes',
StrictHostKeyChecking='no',
UserKnownHostsFile='/dev/null',
@@ -131,8 +132,9 @@ class SshConnection(Connection):
ServerAliveCountMax=4,
)
- for ssh_option in sorted(ssh_options):
- self.options.extend(['-o', f'{ssh_option}={ssh_options[ssh_option]}'])
+ ssh_options.update(settings.options)
+
+ self.options.extend(ssh_options_to_list(ssh_options))
def run(self,
command: list[str],
diff --git a/test/lib/ansible_test/_internal/constants.py b/test/lib/ansible_test/_internal/constants.py
index f516b064..b6072fbe 100644
--- a/test/lib/ansible_test/_internal/constants.py
+++ b/test/lib/ansible_test/_internal/constants.py
@@ -6,6 +6,8 @@ from .._util.target.common.constants import (
REMOTE_ONLY_PYTHON_VERSIONS,
)
+STATUS_HOST_CONNECTION_ERROR = 4
+
# Setting a low soft RLIMIT_NOFILE value will improve the performance of subprocess.Popen on Python 2.x when close_fds=True.
# This will affect all Python subprocesses. It will also affect the current Python process if set before subprocess is imported for the first time.
SOFT_RLIMIT_NOFILE = 1024
diff --git a/test/lib/ansible_test/_internal/containers.py b/test/lib/ansible_test/_internal/containers.py
index 5f727faa..95b1718b 100644
--- a/test/lib/ansible_test/_internal/containers.py
+++ b/test/lib/ansible_test/_internal/containers.py
@@ -35,8 +35,10 @@ from .config import (
from .docker_util import (
ContainerNotFoundError,
DockerInspect,
+ docker_create,
docker_exec,
docker_inspect,
+ docker_network_inspect,
docker_pull,
docker_rm,
docker_run,
@@ -45,6 +47,7 @@ from .docker_util import (
get_docker_host_ip,
get_podman_host_ip,
require_docker,
+ detect_host_properties,
)
from .ansible_util import (
@@ -81,6 +84,10 @@ from .connections import (
SshConnection,
)
+from .thread import (
+ mutex,
+)
+
# information about support containers provisioned by the current ansible-test instance
support_containers: dict[str, ContainerDescriptor] = {}
support_containers_mutex = threading.Lock()
@@ -142,7 +149,7 @@ def run_support_container(
options = (options or [])
if start:
- options.append('-d')
+ options.append('-dt') # the -t option is required to cause systemd in the container to log output to the console
if publish_ports:
for port in ports:
@@ -152,6 +159,10 @@ def run_support_container(
for key, value in env.items():
options.extend(['--env', '%s=%s' % (key, value)])
+ max_open_files = detect_host_properties(args).max_open_files
+
+ options.extend(['--ulimit', 'nofile=%s' % max_open_files])
+
support_container_id = None
if allow_existing:
@@ -176,6 +187,9 @@ def run_support_container(
if not support_container_id:
docker_rm(args, name)
+ if args.dev_systemd_debug:
+ options.extend(('--env', 'SYSTEMD_LOG_LEVEL=debug'))
+
if support_container_id:
display.info('Using existing "%s" container.' % name)
running = True
@@ -183,7 +197,7 @@ def run_support_container(
else:
display.info('Starting new "%s" container.' % name)
docker_pull(args, image)
- support_container_id = docker_run(args, image, name, options, create_only=not start, cmd=cmd)
+ support_container_id = run_container(args, image, name, options, create_only=not start, cmd=cmd)
running = start
existing = False
@@ -221,6 +235,126 @@ def run_support_container(
return descriptor
+def run_container(
+ args: EnvironmentConfig,
+ image: str,
+ name: str,
+ options: t.Optional[list[str]],
+ cmd: t.Optional[list[str]] = None,
+ create_only: bool = False,
+) -> str:
+ """Run a container using the given docker image."""
+ options = list(options or [])
+ cmd = list(cmd or [])
+
+ options.extend(['--name', name])
+
+ network = get_docker_preferred_network_name(args)
+
+ if is_docker_user_defined_network(network):
+ # Only when the network is not the default bridge network.
+ options.extend(['--network', network])
+
+ for _iteration in range(1, 3):
+ try:
+ if create_only:
+ stdout = docker_create(args, image, options, cmd)[0]
+ else:
+ stdout = docker_run(args, image, options, cmd)[0]
+ except SubprocessError as ex:
+ display.error(ex.message)
+ display.warning('Failed to run docker image "{image}". Waiting a few seconds before trying again.')
+ docker_rm(args, name) # podman doesn't remove containers after create if run fails
+ time.sleep(3)
+ else:
+ if args.explain:
+ stdout = ''.join(random.choice('0123456789abcdef') for _iteration in range(64))
+
+ return stdout.strip()
+
+ raise ApplicationError(f'Failed to run docker image "{image}".')
+
+
+def start_container(args: EnvironmentConfig, container_id: str) -> tuple[t.Optional[str], t.Optional[str]]:
+ """Start a docker container by name or ID."""
+ options: list[str] = []
+
+ for _iteration in range(1, 3):
+ try:
+ return docker_start(args, container_id, options)
+ except SubprocessError as ex:
+ display.error(ex.message)
+ display.warning(f'Failed to start docker container "{container_id}". Waiting a few seconds before trying again.')
+ time.sleep(3)
+
+ raise ApplicationError(f'Failed to start docker container "{container_id}".')
+
+
+def get_container_ip_address(args: EnvironmentConfig, container: DockerInspect) -> t.Optional[str]:
+ """Return the IP address of the container for the preferred docker network."""
+ if container.networks:
+ network_name = get_docker_preferred_network_name(args)
+
+ if not network_name:
+ # Sort networks and use the first available.
+ # This assumes all containers will have access to the same networks.
+ network_name = sorted(container.networks.keys()).pop(0)
+
+ ipaddress = container.networks[network_name]['IPAddress']
+ else:
+ ipaddress = container.network_settings['IPAddress']
+
+ if not ipaddress:
+ return None
+
+ return ipaddress
+
+
+@mutex
+def get_docker_preferred_network_name(args: EnvironmentConfig) -> t.Optional[str]:
+ """
+ Return the preferred network name for use with Docker. The selection logic is:
+ - the network selected by the user with `--docker-network`
+ - the network of the currently running docker container (if any)
+ - the default docker network (returns None)
+ """
+ try:
+ return get_docker_preferred_network_name.network # type: ignore[attr-defined]
+ except AttributeError:
+ pass
+
+ network = None
+
+ if args.docker_network:
+ network = args.docker_network
+ else:
+ current_container_id = get_docker_container_id()
+
+ if current_container_id:
+ # Make sure any additional containers we launch use the same network as the current container we're running in.
+ # This is needed when ansible-test is running in a container that is not connected to Docker's default network.
+ container = docker_inspect(args, current_container_id, always=True)
+ network = container.get_network_name()
+
+ # The default docker behavior puts containers on the same network.
+ # The default podman behavior puts containers on isolated networks which don't allow communication between containers or network disconnect.
+ # Starting with podman version 2.1.0 rootless containers are able to join networks.
+ # Starting with podman version 2.2.0 containers can be disconnected from networks.
+ # To maintain feature parity with docker, detect and use the default "podman" network when running under podman.
+ if network is None and require_docker().command == 'podman' and docker_network_inspect(args, 'podman', always=True):
+ network = 'podman'
+
+ get_docker_preferred_network_name.network = network # type: ignore[attr-defined]
+
+ return network
+
+
+def is_docker_user_defined_network(network: str) -> bool:
+ """Return True if the network being used is a user-defined network."""
+ return bool(network) and network != 'bridge'
+
+
+@mutex
def get_container_database(args: EnvironmentConfig) -> ContainerDatabase:
"""Return the current container database, creating it as needed, or returning the one provided on the command line through delegation."""
try:
@@ -572,7 +706,7 @@ class ContainerDescriptor:
def start(self, args: EnvironmentConfig) -> None:
"""Start the container. Used for containers which are created, but not started."""
- docker_start(args, self.name)
+ start_container(args, self.name)
self.register(args)
@@ -582,7 +716,7 @@ class ContainerDescriptor:
raise Exception('Container already registered: %s' % self.name)
try:
- container = docker_inspect(args, self.container_id)
+ container = docker_inspect(args, self.name)
except ContainerNotFoundError:
if not args.explain:
raise
@@ -599,7 +733,7 @@ class ContainerDescriptor:
),
))
- support_container_ip = container.get_ip_address()
+ support_container_ip = get_container_ip_address(args, container)
if self.publish_ports:
# inspect the support container to locate the published ports
@@ -664,7 +798,7 @@ def cleanup_containers(args: EnvironmentConfig) -> None:
if container.cleanup == CleanupMode.YES:
docker_rm(args, container.container_id)
elif container.cleanup == CleanupMode.INFO:
- display.notice('Remember to run `docker rm -f %s` when finished testing.' % container.name)
+ display.notice(f'Remember to run `{require_docker().command} rm -f {container.name}` when finished testing.')
def create_hosts_entries(context: dict[str, ContainerAccess]) -> list[str]:
diff --git a/test/lib/ansible_test/_internal/coverage_util.py b/test/lib/ansible_test/_internal/coverage_util.py
index 4a7b9b5a..43d10718 100644
--- a/test/lib/ansible_test/_internal/coverage_util.py
+++ b/test/lib/ansible_test/_internal/coverage_util.py
@@ -52,6 +52,10 @@ from .constants import (
CONTROLLER_PYTHON_VERSIONS,
)
+from .thread import (
+ mutex,
+)
+
@dataclasses.dataclass(frozen=True)
class CoverageVersion:
@@ -203,6 +207,7 @@ def get_coverage_environment(
return env
+@mutex
def get_coverage_config(args: TestConfig) -> str:
"""Return the path to the coverage config, creating the config if it does not already exist."""
try:
diff --git a/test/lib/ansible_test/_internal/delegation.py b/test/lib/ansible_test/_internal/delegation.py
index 15ca03c6..8c6879d2 100644
--- a/test/lib/ansible_test/_internal/delegation.py
+++ b/test/lib/ansible_test/_internal/delegation.py
@@ -8,6 +8,10 @@ import os
import tempfile
import typing as t
+from .constants import (
+ STATUS_HOST_CONNECTION_ERROR,
+)
+
from .locale_util import (
STANDARD_LOCALE,
)
@@ -200,6 +204,7 @@ def delegate_command(args: EnvironmentConfig, host_state: HostState, exclude: li
con.user = pytest_user
success = False
+ status = 0
try:
# When delegating, preserve the original separate stdout/stderr streams, but only when the following conditions are met:
@@ -209,10 +214,17 @@ def delegate_command(args: EnvironmentConfig, host_state: HostState, exclude: li
output_stream = OutputStream.ORIGINAL if args.display_stderr and not args.interactive else None
con.run(insert_options(command, options), capture=False, interactive=args.interactive, output_stream=output_stream)
success = True
+ except SubprocessError as ex:
+ status = ex.status
+ raise
finally:
if host_delegation:
download_results(args, con, content_root, success)
+ if not success and status == STATUS_HOST_CONNECTION_ERROR:
+ for target in host_state.target_profiles:
+ target.on_target_failure() # when the controller is delegated, report failures after delegation fails
+
def insert_options(command, options):
"""Insert addition command line options into the given command and return the result."""
diff --git a/test/lib/ansible_test/_internal/dev/__init__.py b/test/lib/ansible_test/_internal/dev/__init__.py
new file mode 100644
index 00000000..e7c9b7d5
--- /dev/null
+++ b/test/lib/ansible_test/_internal/dev/__init__.py
@@ -0,0 +1,2 @@
+"""Development and testing support code. Enabled through the use of `--dev-*` command line options."""
+from __future__ import annotations
diff --git a/test/lib/ansible_test/_internal/dev/container_probe.py b/test/lib/ansible_test/_internal/dev/container_probe.py
new file mode 100644
index 00000000..84b88f4b
--- /dev/null
+++ b/test/lib/ansible_test/_internal/dev/container_probe.py
@@ -0,0 +1,210 @@
+"""Diagnostic utilities to probe container cgroup behavior during development and testing (both manual and integration)."""
+from __future__ import annotations
+
+import dataclasses
+import enum
+import json
+import os
+import pathlib
+import pwd
+import typing as t
+
+from ..io import (
+ read_text_file,
+ write_text_file,
+)
+
+from ..util import (
+ display,
+ ANSIBLE_TEST_TARGET_ROOT,
+)
+
+from ..config import (
+ EnvironmentConfig,
+)
+
+from ..docker_util import (
+ LOGINUID_NOT_SET,
+ docker_exec,
+ get_docker_info,
+ get_podman_remote,
+ require_docker,
+)
+
+from ..host_configs import (
+ DockerConfig,
+)
+
+from ..cgroup import (
+ CGroupEntry,
+ CGroupPath,
+ MountEntry,
+ MountType,
+)
+
+
+class CGroupState(enum.Enum):
+ """The expected state of a cgroup related mount point."""
+ HOST = enum.auto()
+ PRIVATE = enum.auto()
+ SHADOWED = enum.auto()
+
+
+@dataclasses.dataclass(frozen=True)
+class CGroupMount:
+ """Details on a cgroup mount point that is expected to be present in the container."""
+ path: str
+ type: t.Optional[str]
+ writable: t.Optional[bool]
+ state: t.Optional[CGroupState]
+
+ def __post_init__(self):
+ assert pathlib.PurePosixPath(self.path).is_relative_to(CGroupPath.ROOT)
+
+ if self.type is None:
+ assert self.state is None
+ elif self.type == MountType.TMPFS:
+ assert self.writable is True
+ assert self.state is None
+ else:
+ assert self.type in (MountType.CGROUP_V1, MountType.CGROUP_V2)
+ assert self.state is not None
+
+
+def check_container_cgroup_status(args: EnvironmentConfig, config: DockerConfig, container_name: str, expected_mounts: tuple[CGroupMount, ...]) -> None:
+ """Check the running container to examine the state of the cgroup hierarchies."""
+ cmd = ['sh', '-c', 'cat /proc/1/cgroup && echo && cat /proc/1/mountinfo']
+
+ stdout = docker_exec(args, container_name, cmd, capture=True)[0]
+ cgroups_stdout, mounts_stdout = stdout.split('\n\n')
+
+ cgroups = CGroupEntry.loads(cgroups_stdout)
+ mounts = MountEntry.loads(mounts_stdout)
+
+ mounts = tuple(mount for mount in mounts if mount.path.is_relative_to(CGroupPath.ROOT))
+
+ mount_cgroups: dict[MountEntry, CGroupEntry] = {}
+ probe_paths: dict[pathlib.PurePosixPath, t.Optional[str]] = {}
+
+ for cgroup in cgroups:
+ if cgroup.subsystem:
+ mount = ([mount for mount in mounts if
+ mount.type == MountType.CGROUP_V1 and
+ mount.path.is_relative_to(cgroup.root_path) and
+ cgroup.full_path.is_relative_to(mount.path)
+ ] or [None])[-1]
+ else:
+ mount = ([mount for mount in mounts if
+ mount.type == MountType.CGROUP_V2 and
+ mount.path == cgroup.root_path
+ ] or [None])[-1]
+
+ if mount:
+ mount_cgroups[mount] = cgroup
+
+ for mount in mounts:
+ probe_paths[mount.path] = None
+
+ if (cgroup := mount_cgroups.get(mount)) and cgroup.full_path != mount.path: # child of mount.path
+ probe_paths[cgroup.full_path] = None
+
+ probe_script = read_text_file(os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'setup', 'probe_cgroups.py'))
+ probe_command = [config.python.path, '-', f'{container_name}-probe'] + [str(path) for path in probe_paths]
+ probe_results = json.loads(docker_exec(args, container_name, probe_command, capture=True, data=probe_script)[0])
+
+ for path in probe_paths:
+ probe_paths[path] = probe_results[str(path)]
+
+ remaining_mounts: dict[pathlib.PurePosixPath, MountEntry] = {mount.path: mount for mount in mounts}
+ results: dict[pathlib.PurePosixPath, tuple[bool, str]] = {}
+
+ for expected_mount in expected_mounts:
+ expected_path = pathlib.PurePosixPath(expected_mount.path)
+
+ if not (actual_mount := remaining_mounts.pop(expected_path, None)):
+ results[expected_path] = (False, 'not mounted')
+ continue
+
+ actual_mount_write_error = probe_paths[actual_mount.path]
+ actual_mount_errors = []
+
+ if cgroup := mount_cgroups.get(actual_mount):
+ if expected_mount.state == CGroupState.SHADOWED:
+ actual_mount_errors.append('unexpected cgroup association')
+
+ if cgroup.root_path == cgroup.full_path and expected_mount.state == CGroupState.HOST:
+ results[cgroup.root_path.joinpath('???')] = (False, 'missing cgroup')
+
+ if cgroup.full_path == actual_mount.path:
+ if cgroup.root_path != cgroup.full_path and expected_mount.state == CGroupState.PRIVATE:
+ actual_mount_errors.append('unexpected mount')
+ else:
+ cgroup_write_error = probe_paths[cgroup.full_path]
+ cgroup_errors = []
+
+ if expected_mount.state == CGroupState.SHADOWED:
+ cgroup_errors.append('unexpected cgroup association')
+
+ if cgroup.root_path != cgroup.full_path and expected_mount.state == CGroupState.PRIVATE:
+ cgroup_errors.append('unexpected cgroup')
+
+ if cgroup_write_error:
+ cgroup_errors.append(cgroup_write_error)
+
+ if cgroup_errors:
+ results[cgroup.full_path] = (False, f'directory errors: {", ".join(cgroup_errors)}')
+ else:
+ results[cgroup.full_path] = (True, 'directory (writable)')
+ elif expected_mount.state not in (None, CGroupState.SHADOWED):
+ actual_mount_errors.append('missing cgroup association')
+
+ if actual_mount.type != expected_mount.type and expected_mount.type is not None:
+ actual_mount_errors.append(f'type not {expected_mount.type}')
+
+ if bool(actual_mount_write_error) == expected_mount.writable:
+ actual_mount_errors.append(f'{actual_mount_write_error or "writable"}')
+
+ if actual_mount_errors:
+ results[actual_mount.path] = (False, f'{actual_mount.type} errors: {", ".join(actual_mount_errors)}')
+ else:
+ results[actual_mount.path] = (True, f'{actual_mount.type} ({actual_mount_write_error or "writable"})')
+
+ for remaining_mount in remaining_mounts.values():
+ remaining_mount_write_error = probe_paths[remaining_mount.path]
+
+ results[remaining_mount.path] = (False, f'unexpected {remaining_mount.type} mount ({remaining_mount_write_error or "writable"})')
+
+ identity = get_identity(args, config, container_name)
+ messages: list[tuple[pathlib.PurePosixPath, bool, str]] = [(path, result[0], result[1]) for path, result in sorted(results.items())]
+ message = '\n'.join(f'{"PASS" if result else "FAIL"}: {path} -> {message}' for path, result, message in messages)
+
+ display.info(f'>>> Container: {identity}\n{message.rstrip()}')
+
+ if args.dev_probe_cgroups:
+ write_text_file(os.path.join(args.dev_probe_cgroups, f'{identity}.log'), message)
+
+
+def get_identity(args: EnvironmentConfig, config: DockerConfig, container_name: str):
+ """Generate and return an identity string to use when logging test results."""
+ engine = require_docker().command
+
+ try:
+ loginuid = int(read_text_file('/proc/self/loginuid'))
+ except FileNotFoundError:
+ loginuid = LOGINUID_NOT_SET
+
+ user = pwd.getpwuid(os.getuid()).pw_name
+ login_user = user if loginuid == LOGINUID_NOT_SET else pwd.getpwuid(loginuid).pw_name
+ remote = engine == 'podman' and get_podman_remote()
+
+ tags = (
+ config.name,
+ engine,
+ f'cgroup={config.cgroup.value}@{get_docker_info(args).cgroup_version}',
+ f'remote={remote}',
+ f'user={user}',
+ f'loginuid={login_user}',
+ container_name,
+ )
+
+ return '|'.join(tags)
diff --git a/test/lib/ansible_test/_internal/docker_util.py b/test/lib/ansible_test/_internal/docker_util.py
index 47a3065c..77cdd4ee 100644
--- a/test/lib/ansible_test/_internal/docker_util.py
+++ b/test/lib/ansible_test/_internal/docker_util.py
@@ -1,18 +1,17 @@
"""Functions for accessing docker via the docker cli."""
from __future__ import annotations
+import dataclasses
+import enum
import json
import os
-import random
+import pathlib
+import re
import socket
import time
import urllib.parse
import typing as t
-from .io import (
- read_text_file,
-)
-
from .util import (
ApplicationError,
common_environment,
@@ -30,7 +29,17 @@ from .util_common import (
from .config import (
CommonConfig,
- EnvironmentConfig,
+)
+
+from .thread import (
+ mutex,
+ named_lock,
+)
+
+from .cgroup import (
+ CGroupEntry,
+ MountEntry,
+ MountType,
)
DOCKER_COMMANDS = [
@@ -38,10 +47,379 @@ DOCKER_COMMANDS = [
'podman',
]
+UTILITY_IMAGE = 'quay.io/ansible/ansible-test-utility-container:2.0.0'
+
# Max number of open files in a docker container.
# Passed with --ulimit option to the docker run command.
MAX_NUM_OPEN_FILES = 10240
+# The value of /proc/*/loginuid when it is not set.
+# It is a reserved UID, which is the maximum 32-bit unsigned integer value.
+# See: https://access.redhat.com/solutions/25404
+LOGINUID_NOT_SET = 4294967295
+
+
+class DockerInfo:
+ """The results of `docker info` and `docker version` for the container runtime."""
+
+ @classmethod
+ def init(cls, args: CommonConfig) -> DockerInfo:
+ """Initialize and return a DockerInfo instance."""
+ command = require_docker().command
+
+ info_stdout = docker_command(args, ['info', '--format', '{{ json . }}'], capture=True, always=True)[0]
+ info = json.loads(info_stdout)
+
+ if server_errors := info.get('ServerErrors'):
+ # This can occur when a remote docker instance is in use and the instance is not responding, such as when the system is still starting up.
+ # In that case an error such as the following may be returned:
+ # error during connect: Get "http://{hostname}:2375/v1.24/info": dial tcp {ip_address}:2375: connect: no route to host
+ raise ApplicationError('Unable to get container host information: ' + '\n'.join(server_errors))
+
+ version_stdout = docker_command(args, ['version', '--format', '{{ json . }}'], capture=True, always=True)[0]
+ version = json.loads(version_stdout)
+
+ info = DockerInfo(args, command, info, version)
+
+ return info
+
+ def __init__(self, args: CommonConfig, engine: str, info: dict[str, t.Any], version: dict[str, t.Any]) -> None:
+ self.args = args
+ self.engine = engine
+ self.info = info
+ self.version = version
+
+ @property
+ def client(self) -> dict[str, t.Any]:
+ """The client version details."""
+ client = self.version.get('Client')
+
+ if not client:
+ raise ApplicationError('Unable to get container host client information.')
+
+ return client
+
+ @property
+ def server(self) -> dict[str, t.Any]:
+ """The server version details."""
+ server = self.version.get('Server')
+
+ if not server:
+ if self.engine == 'podman':
+ # Some Podman versions always report server version info (verified with 1.8.0 and 1.9.3).
+ # Others do not unless Podman remote is being used.
+ # To provide consistency, use the client version if the server version isn't provided.
+ # See: https://github.com/containers/podman/issues/2671#issuecomment-804382934
+ return self.client
+
+ raise ApplicationError('Unable to get container host server information.')
+
+ return server
+
+ @property
+ def client_version(self) -> str:
+ """The client version."""
+ return self.client['Version']
+
+ @property
+ def server_version(self) -> str:
+ """The server version."""
+ return self.server['Version']
+
+ @property
+ def client_major_minor_version(self) -> tuple[int, int]:
+ """The client major and minor version."""
+ major, minor = self.client_version.split('.')[:2]
+ return int(major), int(minor)
+
+ @property
+ def server_major_minor_version(self) -> tuple[int, int]:
+ """The server major and minor version."""
+ major, minor = self.server_version.split('.')[:2]
+ return int(major), int(minor)
+
+ @property
+ def cgroupns_option_supported(self) -> bool:
+ """Return True if the `--cgroupns` option is supported, otherwise return False."""
+ if self.engine == 'docker':
+ # Docker added support for the `--cgroupns` option in version 20.10.
+ # Both the client and server must support the option to use it.
+ # See: https://docs.docker.com/engine/release-notes/#20100
+ return self.client_major_minor_version >= (20, 10) and self.server_major_minor_version >= (20, 10)
+
+ raise NotImplementedError(self.engine)
+
+ @property
+ def cgroup_version(self) -> int:
+ """The cgroup version of the container host."""
+ info = self.info
+ host = info.get('host')
+
+ # When the container host reports cgroup v1 it is running either cgroup v1 legacy mode or cgroup v2 hybrid mode.
+ # When the container host reports cgroup v2 it is running under cgroup v2 unified mode.
+ # See: https://github.com/containers/podman/blob/8356621249e36ed62fc7f35f12d17db9027ff076/libpod/info_linux.go#L52-L56
+ # See: https://github.com/moby/moby/blob/d082bbcc0557ec667faca81b8b33bec380b75dac/daemon/info_unix.go#L24-L27
+
+ if host:
+ return int(host['cgroupVersion'].lstrip('v')) # podman
+
+ try:
+ return int(info['CgroupVersion']) # docker
+ except KeyError:
+ pass
+
+ # Docker 20.10 (API version 1.41) added support for cgroup v2.
+ # Unfortunately the client or server is too old to report the cgroup version.
+ # If the server is old, we can infer the cgroup version.
+ # Otherwise, we'll need to fall back to detection.
+ # See: https://docs.docker.com/engine/release-notes/#20100
+ # See: https://docs.docker.com/engine/api/version-history/#v141-api-changes
+
+ if self.server_major_minor_version < (20, 10):
+ return 1 # old docker server with only cgroup v1 support
+
+ # Tell the user what versions they have and recommend they upgrade the client.
+ # Downgrading the server should also work, but we won't mention that.
+ message = (
+ f'The Docker client version is {self.client_version}. '
+ f'The Docker server version is {self.server_version}. '
+ 'Upgrade your Docker client to version 20.10 or later.'
+ )
+
+ if detect_host_properties(self.args).cgroup_v2:
+ # Unfortunately cgroup v2 was detected on the Docker server.
+ # A newer client is needed to support the `--cgroupns` option for use with cgroup v2.
+ raise ApplicationError(f'Unsupported Docker client and server combination using cgroup v2. {message}')
+
+ display.warning(f'Detected Docker server cgroup v1 using probing. {message}', unique=True)
+
+ return 1 # docker server is using cgroup v1 (or cgroup v2 hybrid)
+
+ @property
+ def docker_desktop_wsl2(self) -> bool:
+ """Return True if Docker Desktop integrated with WSL2 is detected, otherwise False."""
+ info = self.info
+
+ kernel_version = info.get('KernelVersion')
+ operating_system = info.get('OperatingSystem')
+
+ dd_wsl2 = kernel_version and kernel_version.endswith('-WSL2') and operating_system == 'Docker Desktop'
+
+ return dd_wsl2
+
+ @property
+ def description(self) -> str:
+ """Describe the container runtime."""
+ tags = dict(
+ client=self.client_version,
+ server=self.server_version,
+ cgroup=f'v{self.cgroup_version}',
+ )
+
+ labels = [self.engine] + [f'{key}={value}' for key, value in tags.items()]
+
+ if self.docker_desktop_wsl2:
+ labels.append('DD+WSL2')
+
+ return f'Container runtime: {" ".join(labels)}'
+
+
+@mutex
+def get_docker_info(args: CommonConfig) -> DockerInfo:
+ """Return info for the current container runtime. The results are cached."""
+ try:
+ return get_docker_info.info # type: ignore[attr-defined]
+ except AttributeError:
+ pass
+
+ info = DockerInfo.init(args)
+
+ display.info(info.description, verbosity=1)
+
+ get_docker_info.info = info # type: ignore[attr-defined]
+
+ return info
+
+
+class SystemdControlGroupV1Status(enum.Enum):
+ """The state of the cgroup v1 systemd hierarchy on the container host."""
+ SUBSYSTEM_MISSING = 'The systemd cgroup subsystem was not found.'
+ FILESYSTEM_NOT_MOUNTED = 'The "/sys/fs/cgroup/systemd" filesystem is not mounted.'
+ MOUNT_TYPE_NOT_CORRECT = 'The "/sys/fs/cgroup/systemd" mount type is not correct.'
+ VALID = 'The "/sys/fs/cgroup/systemd" mount is valid.'
+
+
+@dataclasses.dataclass(frozen=True)
+class ContainerHostProperties:
+ """Container host properties detected at run time."""
+ audit_code: str
+ max_open_files: int
+ loginuid: t.Optional[int]
+ cgroup_v1: SystemdControlGroupV1Status
+ cgroup_v2: bool
+
+
+@mutex
+def detect_host_properties(args: CommonConfig) -> ContainerHostProperties:
+ """
+ Detect and return properties of the container host.
+
+ The information collected is:
+
+ - The errno result from attempting to query the container host's audit status.
+ - The max number of open files supported by the container host to run containers.
+ This value may be capped to the maximum value used by ansible-test.
+ If the value is below the desired limit, a warning is displayed.
+ - The loginuid used by the container host to run containers, or None if the audit subsystem is unavailable.
+ - The cgroup subsystems registered with the Linux kernel.
+ - The mounts visible within a container.
+ - The status of the systemd cgroup v1 hierarchy.
+
+ This information is collected together to reduce the number of container runs to probe the container host.
+ """
+ try:
+ return detect_host_properties.properties # type: ignore[attr-defined]
+ except AttributeError:
+ pass
+
+ single_line_commands = (
+ 'audit-status',
+ 'cat /proc/sys/fs/nr_open',
+ 'ulimit -Hn',
+ '(cat /proc/1/loginuid; echo)',
+ )
+
+ multi_line_commands = (
+ ' && '.join(single_line_commands),
+ 'cat /proc/1/cgroup',
+ 'cat /proc/1/mountinfo',
+ )
+
+ options = ['--volume', '/sys/fs/cgroup:/probe:ro']
+ cmd = ['sh', '-c', ' && echo "-" && '.join(multi_line_commands)]
+
+ stdout = run_utility_container(args, f'ansible-test-probe-{args.session_name}', cmd, options)[0]
+
+ if args.explain:
+ return ContainerHostProperties(
+ audit_code='???',
+ max_open_files=MAX_NUM_OPEN_FILES,
+ loginuid=LOGINUID_NOT_SET,
+ cgroup_v1=SystemdControlGroupV1Status.VALID,
+ cgroup_v2=False,
+ )
+
+ blocks = stdout.split('\n-\n')
+
+ values = blocks[0].split('\n')
+
+ audit_parts = values[0].split(' ', 1)
+ audit_status = int(audit_parts[0])
+ audit_code = audit_parts[1]
+
+ system_limit = int(values[1])
+ hard_limit = int(values[2])
+ loginuid = int(values[3]) if values[3] else None
+
+ cgroups = CGroupEntry.loads(blocks[1])
+ mounts = MountEntry.loads(blocks[2])
+
+ if hard_limit < MAX_NUM_OPEN_FILES and hard_limit < system_limit and require_docker().command == 'docker':
+ # Podman will use the highest possible limits, up to its default of 1M.
+ # See: https://github.com/containers/podman/blob/009afb50b308548eb129bc68e654db6c6ad82e7a/pkg/specgen/generate/oci.go#L39-L58
+ # Docker limits are less predictable. They could be the system limit or the user's soft limit.
+ # If Docker is running as root it should be able to use the system limit.
+ # When Docker reports a limit below the preferred value and the system limit, attempt to use the preferred value, up to the system limit.
+ options = ['--ulimit', f'nofile={min(system_limit, MAX_NUM_OPEN_FILES)}']
+ cmd = ['sh', '-c', 'ulimit -Hn']
+
+ try:
+ stdout = run_utility_container(args, f'ansible-test-ulimit-{args.session_name}', cmd, options)[0]
+ except SubprocessError as ex:
+ display.warning(str(ex))
+ else:
+ hard_limit = int(stdout)
+
+ # Check the audit error code from attempting to query the container host's audit status.
+ #
+ # The following error codes are known to occur:
+ #
+ # EPERM - Operation not permitted
+ # This occurs when the root user runs a container but lacks the AUDIT_WRITE capability.
+ # This will cause patched versions of OpenSSH to disconnect after a login succeeds.
+ # See: https://src.fedoraproject.org/rpms/openssh/blob/f36/f/openssh-7.6p1-audit.patch
+ #
+ # EBADF - Bad file number
+ # This occurs when the host doesn't support the audit system (the open_audit call fails).
+ # This allows SSH logins to succeed despite the failure.
+ # See: https://github.com/Distrotech/libaudit/blob/4fc64f79c2a7f36e3ab7b943ce33ab5b013a7782/lib/netlink.c#L204-L209
+ #
+ # ECONNREFUSED - Connection refused
+ # This occurs when a non-root user runs a container without the AUDIT_WRITE capability.
+ # When sending an audit message, libaudit ignores this error condition.
+ # This allows SSH logins to succeed despite the failure.
+ # See: https://github.com/Distrotech/libaudit/blob/4fc64f79c2a7f36e3ab7b943ce33ab5b013a7782/lib/deprecated.c#L48-L52
+
+ subsystems = set(cgroup.subsystem for cgroup in cgroups)
+ mount_types = {mount.path: mount.type for mount in mounts}
+
+ if 'systemd' not in subsystems:
+ cgroup_v1 = SystemdControlGroupV1Status.SUBSYSTEM_MISSING
+ elif not (mount_type := mount_types.get(pathlib.PurePosixPath('/probe/systemd'))):
+ cgroup_v1 = SystemdControlGroupV1Status.FILESYSTEM_NOT_MOUNTED
+ elif mount_type != MountType.CGROUP_V1:
+ cgroup_v1 = SystemdControlGroupV1Status.MOUNT_TYPE_NOT_CORRECT
+ else:
+ cgroup_v1 = SystemdControlGroupV1Status.VALID
+
+ cgroup_v2 = mount_types.get(pathlib.PurePosixPath('/probe')) == MountType.CGROUP_V2
+
+ display.info(f'Container host audit status: {audit_code} ({audit_status})', verbosity=1)
+ display.info(f'Container host max open files: {hard_limit}', verbosity=1)
+ display.info(f'Container loginuid: {loginuid if loginuid is not None else "unavailable"}'
+ f'{" (not set)" if loginuid == LOGINUID_NOT_SET else ""}', verbosity=1)
+
+ if hard_limit < MAX_NUM_OPEN_FILES:
+ display.warning(f'Unable to set container max open files to {MAX_NUM_OPEN_FILES}. Using container host limit of {hard_limit} instead.')
+ else:
+ hard_limit = MAX_NUM_OPEN_FILES
+
+ properties = ContainerHostProperties(
+ # The errno (audit_status) is intentionally not exposed here, as it can vary across systems and architectures.
+ # Instead, the symbolic name (audit_code) is used, which is resolved inside the container which generated the error.
+ # See: https://man7.org/linux/man-pages/man3/errno.3.html
+ audit_code=audit_code,
+ max_open_files=hard_limit,
+ loginuid=loginuid,
+ cgroup_v1=cgroup_v1,
+ cgroup_v2=cgroup_v2,
+ )
+
+ detect_host_properties.properties = properties # type: ignore[attr-defined]
+
+ return properties
+
+
+def run_utility_container(
+ args: CommonConfig,
+ name: str,
+ cmd: list[str],
+ options: list[str],
+ data: t.Optional[str] = None,
+) -> tuple[t.Optional[str], t.Optional[str]]:
+ """Run the specified command using the ansible-test utility container, returning stdout and stderr."""
+ options = options + [
+ '--name', name,
+ '--rm',
+ ]
+
+ if data:
+ options.append('-i')
+
+ docker_pull(args, UTILITY_IMAGE)
+
+ return docker_run(args, UTILITY_IMAGE, options, cmd, data)
+
class DockerCommand:
"""Details about the available docker command."""
@@ -62,7 +440,7 @@ class DockerCommand:
executable = find_executable(command, required=False)
if executable:
- version = raw_command([command, '-v'], capture=True)[0].strip()
+ version = raw_command([command, '-v'], env=docker_environment(), capture=True)[0].strip()
if command == 'docker' and 'podman' in version:
continue # avoid detecting podman as docker
@@ -141,7 +519,7 @@ def get_podman_default_hostname() -> t.Optional[str]:
"""
hostname: t.Optional[str] = None
try:
- stdout = raw_command(['podman', 'system', 'connection', 'list', '--format=json'], capture=True)[0]
+ stdout = raw_command(['podman', 'system', 'connection', 'list', '--format=json'], env=docker_environment(), capture=True)[0]
except SubprocessError:
stdout = '[]'
@@ -160,7 +538,8 @@ def get_podman_default_hostname() -> t.Optional[str]:
@cache
-def _get_podman_remote() -> t.Optional[str]:
+def get_podman_remote() -> t.Optional[str]:
+ """Return the remote podman hostname, if any, otherwise return None."""
# URL value resolution precedence:
# - command line value
# - environment variable CONTAINER_HOST
@@ -185,7 +564,7 @@ def _get_podman_remote() -> t.Optional[str]:
@cache
def get_podman_hostname() -> str:
"""Return the hostname of the Podman service."""
- hostname = _get_podman_remote()
+ hostname = get_podman_remote()
if not hostname:
hostname = 'localhost'
@@ -197,164 +576,141 @@ def get_podman_hostname() -> str:
@cache
def get_docker_container_id() -> t.Optional[str]:
"""Return the current container ID if running in a container, otherwise return None."""
- path = '/proc/self/cpuset'
+ mountinfo_path = pathlib.Path('/proc/self/mountinfo')
container_id = None
-
- if os.path.exists(path):
- # File content varies based on the environment:
- # No Container: /
- # Docker: /docker/c86f3732b5ba3d28bb83b6e14af767ab96abbc52de31313dcb1176a62d91a507
- # Azure Pipelines (Docker): /azpl_job/0f2edfed602dd6ec9f2e42c867f4d5ee640ebf4c058e6d3196d4393bb8fd0891
- # Podman: /../../../../../..
- contents = read_text_file(path)
-
- cgroup_path, cgroup_name = os.path.split(contents.strip())
-
- if cgroup_path in ('/docker', '/azpl_job'):
- container_id = cgroup_name
+ engine = None
+
+ if mountinfo_path.is_file():
+ # NOTE: This method of detecting the container engine and container ID relies on implementation details of each container engine.
+ # Although the implementation details have remained unchanged for some time, there is no guarantee they will continue to work.
+ # There have been proposals to create a standard mechanism for this, but none is currently available.
+ # See: https://github.com/opencontainers/runtime-spec/issues/1105
+
+ mounts = MountEntry.loads(mountinfo_path.read_text())
+
+ for mount in mounts:
+ if str(mount.path) == '/etc/hostname':
+ # Podman generates /etc/hostname in the makePlatformBindMounts function.
+ # That function ends up using ContainerRunDirectory to generate a path like: {prefix}/{container_id}/userdata/hostname
+ # NOTE: The {prefix} portion of the path can vary, so should not be relied upon.
+ # See: https://github.com/containers/podman/blob/480c7fbf5361f3bd8c1ed81fe4b9910c5c73b186/libpod/container_internal_linux.go#L660-L664
+ # See: https://github.com/containers/podman/blob/480c7fbf5361f3bd8c1ed81fe4b9910c5c73b186/vendor/github.com/containers/storage/store.go#L3133
+ # This behavior has existed for ~5 years and was present in Podman version 0.2.
+ # See: https://github.com/containers/podman/pull/248
+ if match := re.search('/(?P<id>[0-9a-f]{64})/userdata/hostname$', str(mount.root)):
+ container_id = match.group('id')
+ engine = 'Podman'
+ break
+
+ # Docker generates /etc/hostname in the BuildHostnameFile function.
+ # That function ends up using the containerRoot function to generate a path like: {prefix}/{container_id}/hostname
+ # NOTE: The {prefix} portion of the path can vary, so should not be relied upon.
+ # See: https://github.com/moby/moby/blob/cd8a090e6755bee0bdd54ac8a894b15881787097/container/container_unix.go#L58
+ # See: https://github.com/moby/moby/blob/92e954a2f05998dc05773b6c64bbe23b188cb3a0/daemon/container.go#L86
+ # This behavior has existed for at least ~7 years and was present in Docker version 1.0.1.
+ # See: https://github.com/moby/moby/blob/v1.0.1/daemon/container.go#L351
+ # See: https://github.com/moby/moby/blob/v1.0.1/daemon/daemon.go#L133
+ if match := re.search('/(?P<id>[0-9a-f]{64})/hostname$', str(mount.root)):
+ container_id = match.group('id')
+ engine = 'Docker'
+ break
if container_id:
- display.info('Detected execution in Docker container: %s' % container_id, verbosity=1)
+ display.info(f'Detected execution in {engine} container ID: {container_id}', verbosity=1)
return container_id
-def get_docker_preferred_network_name(args: EnvironmentConfig) -> str:
- """
- Return the preferred network name for use with Docker. The selection logic is:
- - the network selected by the user with `--docker-network`
- - the network of the currently running docker container (if any)
- - the default docker network (returns None)
- """
- try:
- return get_docker_preferred_network_name.network # type: ignore[attr-defined]
- except AttributeError:
- pass
-
- network = None
-
- if args.docker_network:
- network = args.docker_network
- else:
- current_container_id = get_docker_container_id()
-
- if current_container_id:
- # Make sure any additional containers we launch use the same network as the current container we're running in.
- # This is needed when ansible-test is running in a container that is not connected to Docker's default network.
- container = docker_inspect(args, current_container_id, always=True)
- network = container.get_network_name()
-
- get_docker_preferred_network_name.network = network # type: ignore[attr-defined]
-
- return network
-
-
-def is_docker_user_defined_network(network: str) -> bool:
- """Return True if the network being used is a user-defined network."""
- return bool(network) and network != 'bridge'
-
-
-def docker_pull(args: EnvironmentConfig, image: str) -> None:
+def docker_pull(args: CommonConfig, image: str) -> None:
"""
Pull the specified image if it is not available.
Images without a tag or digest will not be pulled.
Retries up to 10 times if the pull fails.
+ A warning will be shown for any image with volumes defined.
+ Images will be pulled only once.
+ Concurrent pulls for the same image will block until the first completes.
"""
+ with named_lock(f'docker_pull:{image}') as first:
+ if first:
+ __docker_pull(args, image)
+
+
+def __docker_pull(args: CommonConfig, image: str) -> None:
+ """Internal implementation for docker_pull. Do not call directly."""
if '@' not in image and ':' not in image:
display.info('Skipping pull of image without tag or digest: %s' % image, verbosity=2)
- return
-
- if docker_image_exists(args, image):
+ inspect = docker_image_inspect(args, image)
+ elif inspect := docker_image_inspect(args, image, always=True):
display.info('Skipping pull of existing image: %s' % image, verbosity=2)
- return
+ else:
+ for _iteration in range(1, 10):
+ try:
+ docker_command(args, ['pull', image], capture=False)
- for _iteration in range(1, 10):
- try:
- docker_command(args, ['pull', image], capture=False)
- return
- except SubprocessError:
- display.warning('Failed to pull docker image "%s". Waiting a few seconds before trying again.' % image)
- time.sleep(3)
+ if (inspect := docker_image_inspect(args, image)) or args.explain:
+ break
- raise ApplicationError('Failed to pull docker image "%s".' % image)
+ display.warning(f'Image "{image}" not found after pull completed. Waiting a few seconds before trying again.')
+ except SubprocessError:
+ display.warning(f'Failed to pull container image "{image}". Waiting a few seconds before trying again.')
+ time.sleep(3)
+ else:
+ raise ApplicationError(f'Failed to pull container image "{image}".')
+ if inspect and inspect.volumes:
+ display.warning(f'Image "{image}" contains {len(inspect.volumes)} volume(s): {", ".join(sorted(inspect.volumes))}\n'
+ 'This may result in leaking anonymous volumes. It may also prevent the image from working on some hosts or container engines.\n'
+ 'The image should be rebuilt without the use of the VOLUME instruction.',
+ unique=True)
-def docker_cp_to(args: EnvironmentConfig, container_id: str, src: str, dst: str) -> None:
+
+def docker_cp_to(args: CommonConfig, container_id: str, src: str, dst: str) -> None:
"""Copy a file to the specified container."""
docker_command(args, ['cp', src, '%s:%s' % (container_id, dst)], capture=True)
-def docker_run(
- args: EnvironmentConfig,
+def docker_create(
+ args: CommonConfig,
image: str,
- name: str,
- options: t.Optional[list[str]],
- cmd: t.Optional[list[str]] = None,
- create_only: bool = False,
-) -> str:
- """Run a container using the given docker image."""
- options = list(options or [])
- options.extend(['--name', name])
-
- if not cmd:
- cmd = []
-
- if create_only:
- command = 'create'
- else:
- command = 'run'
-
- network = get_docker_preferred_network_name(args)
-
- if is_docker_user_defined_network(network):
- # Only when the network is not the default bridge network.
- options.extend(['--network', network])
-
- options.extend(['--ulimit', 'nofile=%s' % MAX_NUM_OPEN_FILES])
-
- for _iteration in range(1, 3):
- try:
- stdout = docker_command(args, [command] + options + [image] + cmd, capture=True)[0]
-
- if args.explain:
- return ''.join(random.choice('0123456789abcdef') for _iteration in range(64))
-
- return stdout.strip()
- except SubprocessError as ex:
- display.error(ex.message)
- display.warning('Failed to run docker image "%s". Waiting a few seconds before trying again.' % image)
- docker_rm(args, name) # podman doesn't remove containers after create if run fails
- time.sleep(3)
-
- raise ApplicationError('Failed to run docker image "%s".' % image)
+ options: list[str],
+ cmd: list[str] = None,
+) -> tuple[t.Optional[str], t.Optional[str]]:
+ """Create a container using the given docker image."""
+ return docker_command(args, ['create'] + options + [image] + cmd, capture=True)
-def docker_start(args: EnvironmentConfig, container_id: str, options: t.Optional[list[str]] = None) -> tuple[t.Optional[str], t.Optional[str]]:
- """
- Start a docker container by name or ID
- """
- if not options:
- options = []
+def docker_run(
+ args: CommonConfig,
+ image: str,
+ options: list[str],
+ cmd: list[str] = None,
+ data: t.Optional[str] = None,
+) -> tuple[t.Optional[str], t.Optional[str]]:
+ """Run a container using the given docker image."""
+ return docker_command(args, ['run'] + options + [image] + cmd, data=data, capture=True)
- for _iteration in range(1, 3):
- try:
- return docker_command(args, ['start'] + options + [container_id], capture=True)
- except SubprocessError as ex:
- display.error(ex.message)
- display.warning('Failed to start docker container "%s". Waiting a few seconds before trying again.' % container_id)
- time.sleep(3)
- raise ApplicationError('Failed to run docker container "%s".' % container_id)
+def docker_start(
+ args: CommonConfig,
+ container_id: str,
+ options: list[str],
+) -> tuple[t.Optional[str], t.Optional[str]]:
+ """Start a container by name or ID."""
+ return docker_command(args, ['start'] + options + [container_id], capture=True)
-def docker_rm(args: EnvironmentConfig, container_id: str) -> None:
+def docker_rm(args: CommonConfig, container_id: str) -> None:
"""Remove the specified container."""
try:
- docker_command(args, ['rm', '-f', container_id], capture=True)
+ # Stop the container with SIGKILL immediately, then remove the container.
+ # Podman supports the `--time` option on `rm`, but only since version 4.0.0.
+ # Docker does not support the `--time` option on `rm`.
+ docker_command(args, ['stop', '--time', '0', container_id], capture=True)
+ docker_command(args, ['rm', container_id], capture=True)
except SubprocessError as ex:
- if 'no such container' in ex.stderr:
- pass # podman does not handle this gracefully, exits 1
- else:
+ # Both Podman and Docker report an error if the container does not exist.
+ # The error messages contain the same "no such container" string, differing only in capitalization.
+ if 'no such container' not in ex.stderr.lower():
raise ex
@@ -372,7 +728,7 @@ class ContainerNotFoundError(DockerError):
class DockerInspect:
"""The results of `docker inspect` for a single container."""
- def __init__(self, args: EnvironmentConfig, inspection: dict[str, t.Any]) -> None:
+ def __init__(self, args: CommonConfig, inspection: dict[str, t.Any]) -> None:
self.args = args
self.inspection = inspection
@@ -416,6 +772,14 @@ class DockerInspect:
return self.state['Running']
@property
+ def pid(self) -> int:
+ """Return the PID of the init process."""
+ if self.args.explain:
+ return 0
+
+ return self.state['Pid']
+
+ @property
def env(self) -> list[str]:
"""Return a list of the environment variables used to create the container."""
return self.config['Env']
@@ -454,27 +818,8 @@ class DockerInspect:
return networks[0]
- def get_ip_address(self) -> t.Optional[str]:
- """Return the IP address of the container for the preferred docker network."""
- if self.networks:
- network_name = get_docker_preferred_network_name(self.args)
- if not network_name:
- # Sort networks and use the first available.
- # This assumes all containers will have access to the same networks.
- network_name = sorted(self.networks.keys()).pop(0)
-
- ipaddress = self.networks[network_name]['IPAddress']
- else:
- ipaddress = self.network_settings['IPAddress']
-
- if not ipaddress:
- return None
-
- return ipaddress
-
-
-def docker_inspect(args: EnvironmentConfig, identifier: str, always: bool = False) -> DockerInspect:
+def docker_inspect(args: CommonConfig, identifier: str, always: bool = False) -> DockerInspect:
"""
Return the results of `docker container inspect` for the specified container.
Raises a ContainerNotFoundError if the container was not found.
@@ -495,23 +840,110 @@ def docker_inspect(args: EnvironmentConfig, identifier: str, always: bool = Fals
raise ContainerNotFoundError(identifier)
-def docker_network_disconnect(args: EnvironmentConfig, container_id: str, network: str) -> None:
+def docker_network_disconnect(args: CommonConfig, container_id: str, network: str) -> None:
"""Disconnect the specified docker container from the given network."""
docker_command(args, ['network', 'disconnect', network, container_id], capture=True)
-def docker_image_exists(args: EnvironmentConfig, image: str) -> bool:
- """Return True if the image exists, otherwise False."""
+class DockerImageInspect:
+ """The results of `docker image inspect` for a single image."""
+ def __init__(self, args: CommonConfig, inspection: dict[str, t.Any]) -> None:
+ self.args = args
+ self.inspection = inspection
+
+ # primary properties
+
+ @property
+ def config(self) -> dict[str, t.Any]:
+ """Return a dictionary of the image config."""
+ return self.inspection['Config']
+
+ # nested properties
+
+ @property
+ def volumes(self) -> dict[str, t.Any]:
+ """Return a dictionary of the image volumes."""
+ return self.config.get('Volumes') or {}
+
+ @property
+ def cmd(self) -> list[str]:
+ """The command to run when the container starts."""
+ return self.config['Cmd']
+
+
+@mutex
+def docker_image_inspect(args: CommonConfig, image: str, always: bool = False) -> t.Optional[DockerImageInspect]:
+ """
+ Return the results of `docker image inspect` for the specified image or None if the image does not exist.
+ """
+ inspect_cache: dict[str, DockerImageInspect]
+
+ try:
+ inspect_cache = docker_image_inspect.cache # type: ignore[attr-defined]
+ except AttributeError:
+ inspect_cache = docker_image_inspect.cache = {} # type: ignore[attr-defined]
+
+ if inspect_result := inspect_cache.get(image):
+ return inspect_result
+
+ try:
+ stdout = docker_command(args, ['image', 'inspect', image], capture=True, always=always)[0]
+ except SubprocessError:
+ stdout = '[]'
+
+ if args.explain and not always:
+ items = []
+ else:
+ items = json.loads(stdout)
+
+ if len(items) > 1:
+ raise ApplicationError(f'Inspection of image "{image}" resulted in {len(items)} items:\n{json.dumps(items, indent=4)}')
+
+ if len(items) == 1:
+ inspect_result = DockerImageInspect(args, items[0])
+ inspect_cache[image] = inspect_result
+ return inspect_result
+
+ return None
+
+
+class DockerNetworkInspect:
+ """The results of `docker network inspect` for a single network."""
+ def __init__(self, args: CommonConfig, inspection: dict[str, t.Any]) -> None:
+ self.args = args
+ self.inspection = inspection
+
+
+def docker_network_inspect(args: CommonConfig, network: str, always: bool = False) -> t.Optional[DockerNetworkInspect]:
+ """
+ Return the results of `docker network inspect` for the specified network or None if the network does not exist.
+ """
try:
- docker_command(args, ['image', 'inspect', image], capture=True)
+ stdout = docker_command(args, ['network', 'inspect', network], capture=True, always=always)[0]
except SubprocessError:
- return False
+ stdout = '[]'
+
+ if args.explain and not always:
+ items = []
+ else:
+ items = json.loads(stdout)
- return True
+ if len(items) == 1:
+ return DockerNetworkInspect(args, items[0])
+
+ return None
+
+
+def docker_logs(args: CommonConfig, container_id: str) -> None:
+ """Display logs for the specified container. If an error occurs, it is displayed rather than raising an exception."""
+ try:
+ docker_command(args, ['logs', container_id], capture=False)
+ except SubprocessError as ex:
+ display.error(str(ex))
def docker_exec(
- args: EnvironmentConfig,
+ args: CommonConfig,
container_id: str,
cmd: list[str],
capture: bool,
@@ -533,18 +965,6 @@ def docker_exec(
output_stream=output_stream, data=data)
-def docker_info(args: CommonConfig) -> dict[str, t.Any]:
- """Return a dictionary containing details from the `docker info` command."""
- stdout, _dummy = docker_command(args, ['info', '--format', '{{json .}}'], capture=True, always=True)
- return json.loads(stdout)
-
-
-def docker_version(args: CommonConfig) -> dict[str, t.Any]:
- """Return a dictionary containing details from the `docker version` command."""
- stdout, _dummy = docker_command(args, ['version', '--format', '{{json .}}'], capture=True, always=True)
- return json.loads(stdout)
-
-
def docker_command(
args: CommonConfig,
cmd: list[str],
@@ -560,7 +980,7 @@ def docker_command(
env = docker_environment()
command = [require_docker().command]
- if command[0] == 'podman' and _get_podman_remote():
+ if command[0] == 'podman' and get_podman_remote():
command.append('--remote')
return run_command(args, command + cmd, env=env, capture=capture, stdin=stdin, stdout=stdout, interactive=interactive, always=always,
@@ -570,5 +990,16 @@ def docker_command(
def docker_environment() -> dict[str, str]:
"""Return a dictionary of docker related environment variables found in the current environment."""
env = common_environment()
- env.update(dict((key, os.environ[key]) for key in os.environ if key.startswith('DOCKER_') or key.startswith('CONTAINER_')))
+
+ var_names = {
+ 'XDG_RUNTIME_DIR', # podman
+ }
+
+ var_prefixes = {
+ 'CONTAINER_', # podman remote
+ 'DOCKER_', # docker
+ }
+
+ env.update({name: value for name, value in os.environ.items() if name in var_names or any(name.startswith(prefix) for prefix in var_prefixes)})
+
return env
diff --git a/test/lib/ansible_test/_internal/host_configs.py b/test/lib/ansible_test/_internal/host_configs.py
index 54818acb..d7671c7f 100644
--- a/test/lib/ansible_test/_internal/host_configs.py
+++ b/test/lib/ansible_test/_internal/host_configs.py
@@ -18,6 +18,8 @@ from .io import (
)
from .completion import (
+ AuditMode,
+ CGroupVersion,
CompletionConfig,
docker_completion,
DockerCompletionConfig,
@@ -282,6 +284,8 @@ class DockerConfig(ControllerHostConfig, PosixConfig):
memory: t.Optional[int] = None
privileged: t.Optional[bool] = None
seccomp: t.Optional[str] = None
+ cgroup: t.Optional[CGroupVersion] = None
+ audit: t.Optional[AuditMode] = None
def get_defaults(self, context: HostContext) -> DockerCompletionConfig:
"""Return the default settings."""
@@ -313,6 +317,12 @@ class DockerConfig(ControllerHostConfig, PosixConfig):
if self.seccomp is None:
self.seccomp = defaults.seccomp
+ if self.cgroup is None:
+ self.cgroup = defaults.cgroup_enum
+
+ if self.audit is None:
+ self.audit = defaults.audit_enum
+
if self.privileged is None:
self.privileged = False
diff --git a/test/lib/ansible_test/_internal/host_profiles.py b/test/lib/ansible_test/_internal/host_profiles.py
index b97152e2..6575e7c1 100644
--- a/test/lib/ansible_test/_internal/host_profiles.py
+++ b/test/lib/ansible_test/_internal/host_profiles.py
@@ -4,11 +4,13 @@ from __future__ import annotations
import abc
import dataclasses
import os
+import shlex
import tempfile
import time
import typing as t
from .io import (
+ read_text_file,
write_text_file,
)
@@ -52,16 +54,29 @@ from .util import (
sanitize_host_name,
sorted_versions,
InternalError,
+ HostConnectionError,
+ ANSIBLE_TEST_TARGET_ROOT,
)
from .util_common import (
+ get_docs_url,
intercept_python,
)
from .docker_util import (
docker_exec,
+ docker_image_inspect,
+ docker_logs,
+ docker_pull,
docker_rm,
get_docker_hostname,
+ require_docker,
+ get_docker_info,
+ detect_host_properties,
+ run_utility_container,
+ SystemdControlGroupV1Status,
+ LOGINUID_NOT_SET,
+ UTILITY_IMAGE,
)
from .bootstrap import (
@@ -103,12 +118,66 @@ from .become import (
Sudo,
)
+from .completion import (
+ AuditMode,
+ CGroupVersion,
+)
+
+from .dev.container_probe import (
+ CGroupMount,
+ CGroupPath,
+ CGroupState,
+ MountType,
+ check_container_cgroup_status,
+)
+
TControllerHostConfig = t.TypeVar('TControllerHostConfig', bound=ControllerHostConfig)
THostConfig = t.TypeVar('THostConfig', bound=HostConfig)
TPosixConfig = t.TypeVar('TPosixConfig', bound=PosixConfig)
TRemoteConfig = t.TypeVar('TRemoteConfig', bound=RemoteConfig)
+class ControlGroupError(ApplicationError):
+ """Raised when the container host does not have the necessary cgroup support to run a container."""
+ def __init__(self, args: CommonConfig, reason: str) -> None:
+ engine = require_docker().command
+ dd_wsl2 = get_docker_info(args).docker_desktop_wsl2
+
+ message = f'''
+{reason}
+
+Run the following commands as root on the container host to resolve this issue:
+
+ mkdir /sys/fs/cgroup/systemd
+ mount cgroup -t cgroup /sys/fs/cgroup/systemd -o none,name=systemd,xattr
+ chown -R {{user}}:{{group}} /sys/fs/cgroup/systemd # only when rootless
+
+NOTE: These changes must be applied each time the container host is rebooted.
+'''.strip()
+
+ podman_message = '''
+ If rootless Podman is already running [1], you may need to stop it before
+ containers are able to use the new mount point.
+
+[1] Check for 'podman' and 'catatonit' processes.
+'''
+
+ dd_wsl_message = f'''
+ When using Docker Desktop with WSL2, additional configuration [1] is required.
+
+[1] {get_docs_url("https://docs.ansible.com/ansible-core/devel/dev_guide/testing_running_locally.html#docker-desktop-with-wsl2")}
+'''
+
+ if engine == 'podman':
+ message += podman_message
+ elif dd_wsl2:
+ message += dd_wsl_message
+
+ message = message.strip()
+
+ super().__init__(message)
+
+
@dataclasses.dataclass(frozen=True)
class Inventory:
"""Simple representation of an Ansible inventory."""
@@ -179,6 +248,9 @@ class HostProfile(t.Generic[THostConfig], metaclass=abc.ABCMeta):
def setup(self) -> None:
"""Perform out-of-band setup before delegation."""
+ def on_target_failure(self) -> None:
+ """Executed during failure handling if this profile is a target."""
+
def deprovision(self) -> None:
"""Deprovision the host after delegation has completed."""
@@ -331,6 +403,17 @@ class ControllerProfile(SshTargetHostProfile[ControllerConfig], PosixProfile[Con
class DockerProfile(ControllerHostProfile[DockerConfig], SshTargetHostProfile[DockerConfig]):
"""Host profile for a docker instance."""
+
+ MARKER = 'ansible-test-marker'
+
+ @dataclasses.dataclass(frozen=True)
+ class InitConfig:
+ """Configuration details required to run the container init."""
+ options: list[str]
+ command: str
+ command_privileged: bool
+ expected_mounts: tuple[CGroupMount, ...]
+
@property
def container_name(self) -> t.Optional[str]:
"""Return the stored container name, if any, otherwise None."""
@@ -341,24 +424,519 @@ class DockerProfile(ControllerHostProfile[DockerConfig], SshTargetHostProfile[Do
"""Store the given container name."""
self.state['container_name'] = value
+ @property
+ def cgroup_path(self) -> t.Optional[str]:
+ """Return the path to the cgroup v1 systemd hierarchy, if any, otherwise None."""
+ return self.state.get('cgroup_path')
+
+ @cgroup_path.setter
+ def cgroup_path(self, value: str) -> None:
+ """Store the path to the cgroup v1 systemd hierarchy."""
+ self.state['cgroup_path'] = value
+
+ @property
+ def label(self) -> str:
+ """Label to apply to resources related to this profile."""
+ return f'{"controller" if self.controller else "target"}-{self.args.session_name}'
+
def provision(self) -> None:
"""Provision the host before delegation."""
+ init_probe = self.args.dev_probe_cgroups is not None
+ init_config = self.get_init_config()
+
container = run_support_container(
args=self.args,
context='__test_hosts__',
image=self.config.image,
- name=f'ansible-test-{"controller" if self.controller else "target"}-{self.args.session_name}',
+ name=f'ansible-test-{self.label}',
ports=[22],
publish_ports=not self.controller, # connections to the controller over SSH are not required
- options=self.get_docker_run_options(),
+ options=init_config.options,
cleanup=CleanupMode.NO,
+ cmd=self.build_init_command(init_config, init_probe),
)
if not container:
+ if self.args.prime_containers:
+ if init_config.command_privileged or init_probe:
+ docker_pull(self.args, UTILITY_IMAGE)
+
return
self.container_name = container.name
+ try:
+ options = ['--pid', 'host', '--privileged']
+
+ if init_config.command and init_config.command_privileged:
+ init_command = init_config.command
+
+ if not init_probe:
+ init_command += f' && {shlex.join(self.wake_command)}'
+
+ cmd = ['nsenter', '-t', str(container.details.container.pid), '-m', '-p', 'sh', '-c', init_command]
+ run_utility_container(self.args, f'ansible-test-init-{self.label}', cmd, options)
+
+ if init_probe:
+ check_container_cgroup_status(self.args, self.config, self.container_name, init_config.expected_mounts)
+
+ cmd = ['nsenter', '-t', str(container.details.container.pid), '-m', '-p'] + self.wake_command
+ run_utility_container(self.args, f'ansible-test-wake-{self.label}', cmd, options)
+ except SubprocessError:
+ display.info(f'Checking container "{self.container_name}" logs...')
+ docker_logs(self.args, self.container_name)
+
+ raise
+
+ def get_init_config(self) -> InitConfig:
+ """Return init config for running under the current container engine."""
+ self.check_cgroup_requirements()
+
+ engine = require_docker().command
+ init_config = getattr(self, f'get_{engine}_init_config')()
+
+ return init_config
+
+ def get_podman_init_config(self) -> InitConfig:
+ """Return init config for running under Podman."""
+ options = self.get_common_run_options()
+ command: t.Optional[str] = None
+ command_privileged = False
+ expected_mounts: tuple[CGroupMount, ...]
+
+ cgroup_version = get_docker_info(self.args).cgroup_version
+
+ # Without AUDIT_WRITE the following errors may appear in the system logs of a container after attempting to log in using SSH:
+ #
+ # fatal: linux_audit_write_entry failed: Operation not permitted
+ #
+ # This occurs when running containers as root when the container host provides audit support, but the user lacks the AUDIT_WRITE capability.
+ # The AUDIT_WRITE capability is provided by docker by default, but not podman.
+ # See: https://github.com/moby/moby/pull/7179
+ #
+ # OpenSSH Portable requires AUDIT_WRITE when logging in with a TTY if the Linux audit feature was compiled in.
+ # Containers with the feature enabled will require the AUDIT_WRITE capability when EPERM is returned while accessing the audit system.
+ # See: https://github.com/openssh/openssh-portable/blob/2dc328023f60212cd29504fc05d849133ae47355/audit-linux.c#L90
+ # See: https://github.com/openssh/openssh-portable/blob/715c892f0a5295b391ae92c26ef4d6a86ea96e8e/loginrec.c#L476-L478
+ #
+ # Some containers will be running a patched version of OpenSSH which blocks logins when EPERM is received while using the audit system.
+ # These containers will require the AUDIT_WRITE capability when EPERM is returned while accessing the audit system.
+ # See: https://src.fedoraproject.org/rpms/openssh/blob/f36/f/openssh-7.6p1-audit.patch
+ #
+ # Since only some containers carry the patch or enable the Linux audit feature in OpenSSH, this capability is enabled on a per-container basis.
+ # No warning is provided when adding this capability, since there's not really anything the user can do about it.
+ if self.config.audit == AuditMode.REQUIRED and detect_host_properties(self.args).audit_code == 'EPERM':
+ options.extend(('--cap-add', 'AUDIT_WRITE'))
+
+ # Without AUDIT_CONTROL the following errors may appear in the system logs of a container after attempting to log in using SSH:
+ #
+ # pam_loginuid(sshd:session): Error writing /proc/self/loginuid: Operation not permitted
+ # pam_loginuid(sshd:session): set_loginuid failed
+ #
+ # Containers configured to use the pam_loginuid module will encounter this error. If the module is required, logins will fail.
+ # Since most containers will have this configuration, the code to handle this issue is applied to all containers.
+ #
+ # This occurs when the loginuid is set on the container host and doesn't match the user on the container host which is running the container.
+ # Container hosts which do not use systemd are likely to leave the loginuid unset and thus be unaffected.
+ # The most common source of a mismatch is the use of sudo to run ansible-test, which changes the uid but cannot change the loginuid.
+ # This condition typically occurs only under podman, since the loginuid is inherited from the current user.
+ # See: https://github.com/containers/podman/issues/13012#issuecomment-1034049725
+ #
+ # This condition is detected by querying the loginuid of a container running on the container host.
+ # When it occurs, a warning is displayed and the AUDIT_CONTROL capability is added to containers to work around the issue.
+ # The warning serves as notice to the user that their usage of ansible-test is responsible for the additional capability requirement.
+ if (loginuid := detect_host_properties(self.args).loginuid) not in (0, LOGINUID_NOT_SET, None):
+ display.warning(f'Running containers with capability AUDIT_CONTROL since the container loginuid ({loginuid}) is incorrect. '
+ 'This is most likely due to use of sudo to run ansible-test when loginuid is already set.', unique=True)
+
+ options.extend(('--cap-add', 'AUDIT_CONTROL'))
+
+ if self.config.cgroup == CGroupVersion.NONE:
+ # Containers which do not require cgroup do not use systemd.
+
+ options.extend((
+ # Disabling systemd support in Podman will allow these containers to work on hosts without systemd.
+ # Without this, running a container on a host without systemd results in errors such as (from crun):
+ # Error: crun: error stat'ing file `/sys/fs/cgroup/systemd`: No such file or directory:
+ # A similar error occurs when using runc:
+ # OCI runtime attempted to invoke a command that was not found
+ '--systemd', 'false',
+ # A private cgroup namespace limits what is visible in /proc/*/cgroup.
+ '--cgroupns', 'private',
+ # Mounting a tmpfs overrides the cgroup mount(s) that would otherwise be provided by Podman.
+ # This helps provide a consistent container environment across various container host configurations.
+ '--tmpfs', '/sys/fs/cgroup',
+ ))
+
+ expected_mounts = (
+ CGroupMount(path=CGroupPath.ROOT, type=MountType.TMPFS, writable=True, state=None),
+ )
+ elif self.config.cgroup in (CGroupVersion.V1_V2, CGroupVersion.V1_ONLY) and cgroup_version == 1:
+ # Podman hosts providing cgroup v1 will automatically bind mount the systemd hierarchy read-write in the container.
+ # They will also create a dedicated cgroup v1 systemd hierarchy for the container.
+ # On hosts with systemd this path is: /sys/fs/cgroup/systemd/libpod_parent/libpod-{container_id}/
+ # On hosts without systemd this path is: /sys/fs/cgroup/systemd/{container_id}/
+
+ options.extend((
+ # Force Podman to enable systemd support since a command may be used later (to support pre-init diagnostics).
+ '--systemd', 'always',
+ # The host namespace must be used to permit the container to access the cgroup v1 systemd hierarchy created by Podman.
+ '--cgroupns', 'host',
+ # Mask the host cgroup tmpfs mount to avoid exposing the host cgroup v1 hierarchies (or cgroup v2 hybrid) to the container.
+ # Podman will provide a cgroup v1 systemd hiearchy on top of this.
+ '--tmpfs', '/sys/fs/cgroup',
+ ))
+
+ self.check_systemd_cgroup_v1(options) # podman
+
+ expected_mounts = (
+ CGroupMount(path=CGroupPath.ROOT, type=MountType.TMPFS, writable=True, state=None),
+ # The mount point can be writable or not.
+ # The reason for the variation is not known.
+ CGroupMount(path=CGroupPath.SYSTEMD, type=MountType.CGROUP_V1, writable=None, state=CGroupState.HOST),
+ # The filesystem type can be tmpfs or devtmpfs.
+ # The reason for the variation is not known.
+ CGroupMount(path=CGroupPath.SYSTEMD_RELEASE_AGENT, type=None, writable=False, state=None),
+ )
+ elif self.config.cgroup in (CGroupVersion.V1_V2, CGroupVersion.V2_ONLY) and cgroup_version == 2:
+ # Podman hosts providing cgroup v2 will give each container a read-write cgroup mount.
+
+ options.extend((
+ # Force Podman to enable systemd support since a command may be used later (to support pre-init diagnostics).
+ '--systemd', 'always',
+ # A private cgroup namespace is used to avoid exposing the host cgroup to the container.
+ '--cgroupns', 'private',
+ ))
+
+ expected_mounts = (
+ CGroupMount(path=CGroupPath.ROOT, type=MountType.CGROUP_V2, writable=True, state=CGroupState.PRIVATE),
+ )
+ elif self.config.cgroup == CGroupVersion.V1_ONLY and cgroup_version == 2:
+ # Containers which require cgroup v1 need explicit volume mounts on container hosts not providing that version.
+ # We must put the container PID 1 into the cgroup v1 systemd hierarchy we create.
+ cgroup_path = self.create_systemd_cgroup_v1() # podman
+ command = f'echo 1 > {cgroup_path}/cgroup.procs'
+
+ options.extend((
+ # Force Podman to enable systemd support since a command is being provided.
+ '--systemd', 'always',
+ # A private cgroup namespace is required. Using the host cgroup namespace results in errors such as the following (from crun):
+ # Error: OCI runtime error: mount `/sys/fs/cgroup` to '/sys/fs/cgroup': Invalid argument
+ # A similar error occurs when using runc:
+ # Error: OCI runtime error: runc create failed: unable to start container process: error during container init:
+ # error mounting "/sys/fs/cgroup" to rootfs at "/sys/fs/cgroup": mount /sys/fs/cgroup:/sys/fs/cgroup (via /proc/self/fd/7), flags: 0x1000:
+ # invalid argument
+ '--cgroupns', 'private',
+ # Unlike Docker, Podman ignores a /sys/fs/cgroup tmpfs mount, instead exposing a cgroup v2 mount.
+ # The exposed volume will be read-write, but the container will have its own private namespace.
+ # Provide a read-only cgroup v1 systemd hierarchy under which the dedicated ansible-test cgroup will be mounted read-write.
+ # Without this systemd will fail while attempting to mount the cgroup v1 systemd hierarchy.
+ # Podman doesn't support using a tmpfs for this. Attempting to do so results in an error (from crun):
+ # Error: OCI runtime error: read: Invalid argument
+ # A similar error occurs when using runc:
+ # Error: OCI runtime error: runc create failed: unable to start container process: error during container init:
+ # error mounting "tmpfs" to rootfs at "/sys/fs/cgroup/systemd": tmpcopyup: failed to copy /sys/fs/cgroup/systemd to /proc/self/fd/7
+ # (/tmp/runctop3876247619/runctmpdir1460907418): read /proc/self/fd/7/cgroup.kill: invalid argument
+ '--volume', '/sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd:ro',
+ # Provide the container access to the cgroup v1 systemd hierarchy created by ansible-test.
+ '--volume', f'{cgroup_path}:{cgroup_path}:rw',
+ ))
+
+ expected_mounts = (
+ CGroupMount(path=CGroupPath.ROOT, type=MountType.CGROUP_V2, writable=True, state=CGroupState.PRIVATE),
+ CGroupMount(path=CGroupPath.SYSTEMD, type=MountType.CGROUP_V1, writable=False, state=CGroupState.SHADOWED),
+ CGroupMount(path=cgroup_path, type=MountType.CGROUP_V1, writable=True, state=CGroupState.HOST),
+ )
+ else:
+ raise InternalError(f'Unhandled cgroup configuration: {self.config.cgroup} on cgroup v{cgroup_version}.')
+
+ return self.InitConfig(
+ options=options,
+ command=command,
+ command_privileged=command_privileged,
+ expected_mounts=expected_mounts,
+ )
+
+ def get_docker_init_config(self) -> InitConfig:
+ """Return init config for running under Docker."""
+ options = self.get_common_run_options()
+ command: t.Optional[str] = None
+ command_privileged = False
+ expected_mounts: tuple[CGroupMount, ...]
+
+ cgroup_version = get_docker_info(self.args).cgroup_version
+
+ if self.config.cgroup == CGroupVersion.NONE:
+ # Containers which do not require cgroup do not use systemd.
+
+ if get_docker_info(self.args).cgroupns_option_supported:
+ # Use the `--cgroupns` option if it is supported.
+ # Older servers which do not support the option use the host group namespace.
+ # Older clients which do not support the option cause newer servers to use the host cgroup namespace (cgroup v1 only).
+ # See: https://github.com/moby/moby/blob/master/api/server/router/container/container_routes.go#L512-L517
+ # If the host cgroup namespace is used, cgroup information will be visible, but the cgroup mounts will be unavailable due to the tmpfs below.
+ options.extend((
+ # A private cgroup namespace limits what is visible in /proc/*/cgroup.
+ '--cgroupns', 'private',
+ ))
+
+ options.extend((
+ # Mounting a tmpfs overrides the cgroup mount(s) that would otherwise be provided by Docker.
+ # This helps provide a consistent container environment across various container host configurations.
+ '--tmpfs', '/sys/fs/cgroup',
+ ))
+
+ expected_mounts = (
+ CGroupMount(path=CGroupPath.ROOT, type=MountType.TMPFS, writable=True, state=None),
+ )
+ elif self.config.cgroup in (CGroupVersion.V1_V2, CGroupVersion.V1_ONLY) and cgroup_version == 1:
+ # Docker hosts providing cgroup v1 will automatically bind mount the systemd hierarchy read-only in the container.
+ # They will also create a dedicated cgroup v1 systemd hierarchy for the container.
+ # The cgroup v1 system hierarchy path is: /sys/fs/cgroup/systemd/{container_id}/
+
+ if get_docker_info(self.args).cgroupns_option_supported:
+ # Use the `--cgroupns` option if it is supported.
+ # Older servers which do not support the option use the host group namespace.
+ # Older clients which do not support the option cause newer servers to use the host cgroup namespace (cgroup v1 only).
+ # See: https://github.com/moby/moby/blob/master/api/server/router/container/container_routes.go#L512-L517
+ options.extend((
+ # The host cgroup namespace must be used.
+ # Otherwise, /proc/1/cgroup will report "/" for the cgroup path, which is incorrect.
+ # See: https://github.com/systemd/systemd/issues/19245#issuecomment-815954506
+ # It is set here to avoid relying on the current Docker configuration.
+ '--cgroupns', 'host',
+ ))
+
+ options.extend((
+ # Mask the host cgroup tmpfs mount to avoid exposing the host cgroup v1 hierarchies (or cgroup v2 hybrid) to the container.
+ '--tmpfs', '/sys/fs/cgroup',
+ # A cgroup v1 systemd hierarchy needs to be mounted read-write over the read-only one provided by Docker.
+ # Alternatives were tested, but were unusable due to various issues:
+ # - Attempting to remount the existing mount point read-write will result in a "mount point is busy" error.
+ # - Adding the entire "/sys/fs/cgroup" mount will expose hierarchies other than systemd.
+ # If the host is a cgroup v2 hybrid host it would also expose the /sys/fs/cgroup/unified/ hierarchy read-write.
+ # On older systems, such as an Ubuntu 18.04 host, a dedicated v2 cgroup would not be used, exposing the host cgroups to the container.
+ '--volume', '/sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd:rw',
+ ))
+
+ self.check_systemd_cgroup_v1(options) # docker
+
+ expected_mounts = (
+ CGroupMount(path=CGroupPath.ROOT, type=MountType.TMPFS, writable=True, state=None),
+ CGroupMount(path=CGroupPath.SYSTEMD, type=MountType.CGROUP_V1, writable=True, state=CGroupState.HOST),
+ )
+ elif self.config.cgroup in (CGroupVersion.V1_V2, CGroupVersion.V2_ONLY) and cgroup_version == 2:
+ # Docker hosts providing cgroup v2 will give each container a read-only cgroup mount.
+ # It must be remounted read-write before systemd starts.
+ # This must be done in a privileged container, otherwise a "permission denied" error can occur.
+ command = 'mount -o remount,rw /sys/fs/cgroup/'
+ command_privileged = True
+
+ options.extend((
+ # A private cgroup namespace is used to avoid exposing the host cgroup to the container.
+ # This matches the behavior in Podman 1.7.0 and later, which select cgroupns 'host' mode for cgroup v1 and 'private' mode for cgroup v2.
+ # See: https://github.com/containers/podman/pull/4374
+ # See: https://github.com/containers/podman/blob/main/RELEASE_NOTES.md#170
+ '--cgroupns', 'private',
+ ))
+
+ expected_mounts = (
+ CGroupMount(path=CGroupPath.ROOT, type=MountType.CGROUP_V2, writable=True, state=CGroupState.PRIVATE),
+ )
+ elif self.config.cgroup == CGroupVersion.V1_ONLY and cgroup_version == 2:
+ # Containers which require cgroup v1 need explicit volume mounts on container hosts not providing that version.
+ # We must put the container PID 1 into the cgroup v1 systemd hierarchy we create.
+ cgroup_path = self.create_systemd_cgroup_v1() # docker
+ command = f'echo 1 > {cgroup_path}/cgroup.procs'
+
+ options.extend((
+ # A private cgroup namespace is used since no access to the host cgroup namespace is required.
+ # This matches the configuration used for running cgroup v1 containers under Podman.
+ '--cgroupns', 'private',
+ # Provide a read-write tmpfs filesystem to support additional cgroup mount points.
+ # Without this Docker will provide a read-only cgroup2 mount instead.
+ '--tmpfs', '/sys/fs/cgroup',
+ # Provide a read-write tmpfs filesystem to simulate a systemd cgroup v1 hierarchy.
+ # Without this systemd will fail while attempting to mount the cgroup v1 systemd hierarchy.
+ '--tmpfs', '/sys/fs/cgroup/systemd',
+ # Provide the container access to the cgroup v1 systemd hierarchy created by ansible-test.
+ '--volume', f'{cgroup_path}:{cgroup_path}:rw',
+ ))
+
+ expected_mounts = (
+ CGroupMount(path=CGroupPath.ROOT, type=MountType.TMPFS, writable=True, state=None),
+ CGroupMount(path=CGroupPath.SYSTEMD, type=MountType.TMPFS, writable=True, state=None),
+ CGroupMount(path=cgroup_path, type=MountType.CGROUP_V1, writable=True, state=CGroupState.HOST),
+ )
+ else:
+ raise InternalError(f'Unhandled cgroup configuration: {self.config.cgroup} on cgroup v{cgroup_version}.')
+
+ return self.InitConfig(
+ options=options,
+ command=command,
+ command_privileged=command_privileged,
+ expected_mounts=expected_mounts,
+ )
+
+ def build_init_command(self, init_config: InitConfig, sleep: bool) -> t.Optional[list[str]]:
+ """
+ Build and return the command to start in the container.
+ Returns None if the default command for the container should be used.
+
+ The sleep duration below was selected to:
+
+ - Allow enough time to perform necessary operations in the container before waking it.
+ - Make the delay obvious if the wake command doesn't run or succeed.
+ - Avoid hanging indefinitely or for an unreasonably long time.
+
+ NOTE: The container must have a POSIX-compliant default shell "sh" with a non-builtin "sleep" command.
+ """
+ command = ''
+
+ if init_config.command and not init_config.command_privileged:
+ command += f'{init_config.command} && '
+
+ if sleep or init_config.command_privileged:
+ command += 'sleep 60 ; '
+
+ if not command:
+ return None
+
+ docker_pull(self.args, self.config.image)
+ inspect = docker_image_inspect(self.args, self.config.image)
+
+ command += f'exec {shlex.join(inspect.cmd)}'
+
+ return ['sh', '-c', command]
+
+ @property
+ def wake_command(self) -> list[str]:
+ """
+ The command used to wake the container from sleep.
+ This will be run inside our utility container, so the command used does not need to be present in the container being woken up.
+ """
+ return ['pkill', 'sleep']
+
+ def check_systemd_cgroup_v1(self, options: list[str]) -> None:
+ """Check the cgroup v1 systemd hierarchy to verify it is writeable for our container."""
+ probe_script = (read_text_file(os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'setup', 'check_systemd_cgroup_v1.sh'))
+ .replace('@MARKER@', self.MARKER)
+ .replace('@LABEL@', self.label))
+
+ cmd = ['sh']
+
+ try:
+ run_utility_container(self.args, f'ansible-test-cgroup-check-{self.label}', cmd, options, data=probe_script)
+ except SubprocessError as ex:
+ if error := self.extract_error(ex.stderr):
+ raise ControlGroupError(self.args, 'Unable to create a v1 cgroup within the systemd hierarchy.\n'
+ f'Reason: {error}') from ex # cgroup probe failed
+
+ raise
+
+ def create_systemd_cgroup_v1(self) -> str:
+ """Create a unique ansible-test cgroup in the v1 systemd hierarchy and return its path."""
+ self.cgroup_path = f'/sys/fs/cgroup/systemd/ansible-test-{self.label}'
+
+ # Privileged mode is required to create the cgroup directories on some hosts, such as Fedora 36 and RHEL 9.0.
+ # The mkdir command will fail with "Permission denied" otherwise.
+ options = ['--volume', '/sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd:rw', '--privileged']
+ cmd = ['sh', '-c', f'>&2 echo {shlex.quote(self.MARKER)} && mkdir {shlex.quote(self.cgroup_path)}']
+
+ try:
+ run_utility_container(self.args, f'ansible-test-cgroup-create-{self.label}', cmd, options)
+ except SubprocessError as ex:
+ if error := self.extract_error(ex.stderr):
+ raise ControlGroupError(self.args, f'Unable to create a v1 cgroup within the systemd hierarchy.\n'
+ f'Reason: {error}') from ex # cgroup create permission denied
+
+ raise
+
+ return self.cgroup_path
+
+ @property
+ def delete_systemd_cgroup_v1_command(self) -> list[str]:
+ """The command used to remove the previously created ansible-test cgroup in the v1 systemd hierarchy."""
+ return ['find', self.cgroup_path, '-type', 'd', '-delete']
+
+ def delete_systemd_cgroup_v1(self) -> None:
+ """Delete a previously created ansible-test cgroup in the v1 systemd hierarchy."""
+ # Privileged mode is required to remove the cgroup directories on some hosts, such as Fedora 36 and RHEL 9.0.
+ # The BusyBox find utility will report "Permission denied" otherwise, although it still exits with a status code of 0.
+ options = ['--volume', '/sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd:rw', '--privileged']
+ cmd = ['sh', '-c', f'>&2 echo {shlex.quote(self.MARKER)} && {shlex.join(self.delete_systemd_cgroup_v1_command)}']
+
+ try:
+ run_utility_container(self.args, f'ansible-test-cgroup-delete-{self.label}', cmd, options)
+ except SubprocessError as ex:
+ if error := self.extract_error(ex.stderr):
+ if error.endswith(': No such file or directory'):
+ return
+
+ display.error(str(ex))
+
+ def extract_error(self, value: str) -> t.Optional[str]:
+ """
+ Extract the ansible-test portion of the error message from the given value and return it.
+ Returns None if no ansible-test marker was found.
+ """
+ lines = value.strip().splitlines()
+
+ try:
+ idx = lines.index(self.MARKER)
+ except ValueError:
+ return None
+
+ lines = lines[idx + 1:]
+ message = '\n'.join(lines)
+
+ return message
+
+ def check_cgroup_requirements(self):
+ """Check cgroup requirements for the container."""
+ cgroup_version = get_docker_info(self.args).cgroup_version
+
+ if cgroup_version not in (1, 2):
+ raise ApplicationError(f'The container host provides cgroup v{cgroup_version}, but only version v1 and v2 are supported.')
+
+ # Stop early for containers which require cgroup v2 when the container host does not provide it.
+ # None of the containers included with ansible-test currently use this configuration.
+ # Support for v2-only was added in preparation for the eventual removal of cgroup v1 support from systemd after EOY 2023.
+ # See: https://github.com/systemd/systemd/pull/24086
+ if self.config.cgroup == CGroupVersion.V2_ONLY and cgroup_version != 2:
+ raise ApplicationError(f'Container {self.config.name} requires cgroup v2 but the container host provides cgroup v{cgroup_version}.')
+
+ # Containers which use old versions of systemd (earlier than version 226) require cgroup v1 support.
+ # If the host is a cgroup v2 (unified) host, changes must be made to how the container is run.
+ #
+ # See: https://github.com/systemd/systemd/blob/main/NEWS
+ # Under the "CHANGES WITH 226" section:
+ # > systemd now optionally supports the new Linux kernel "unified" control group hierarchy.
+ #
+ # NOTE: The container host must have the cgroup v1 mount already present.
+ # If the container is run rootless, the user it runs under must have permissions to the mount.
+ #
+ # The following commands can be used to make the mount available:
+ #
+ # mkdir /sys/fs/cgroup/systemd
+ # mount cgroup -t cgroup /sys/fs/cgroup/systemd -o none,name=systemd,xattr
+ # chown -R {user}:{group} /sys/fs/cgroup/systemd # only when rootless
+ #
+ # See: https://github.com/containers/crun/blob/main/crun.1.md#runocisystemdforce_cgroup_v1path
+ if self.config.cgroup == CGroupVersion.V1_ONLY or (self.config.cgroup != CGroupVersion.NONE and get_docker_info(self.args).cgroup_version == 1):
+ if (cgroup_v1 := detect_host_properties(self.args).cgroup_v1) != SystemdControlGroupV1Status.VALID:
+ if self.config.cgroup == CGroupVersion.V1_ONLY:
+ if get_docker_info(self.args).cgroup_version == 2:
+ reason = f'Container {self.config.name} requires cgroup v1, but the container host only provides cgroup v2.'
+ else:
+ reason = f'Container {self.config.name} requires cgroup v1, but the container host does not appear to be running systemd.'
+ else:
+ reason = 'The container host provides cgroup v1, but does not appear to be running systemd.'
+
+ reason += f'\n{cgroup_v1.value}'
+
+ raise ControlGroupError(self.args, reason) # cgroup probe reported invalid state
+
def setup(self) -> None:
"""Perform out-of-band setup before delegation."""
bootstrapper = BootstrapDocker(
@@ -370,32 +948,62 @@ class DockerProfile(ControllerHostProfile[DockerConfig], SshTargetHostProfile[Do
setup_sh = bootstrapper.get_script()
shell = setup_sh.splitlines()[0][2:]
- docker_exec(self.args, self.container_name, [shell], data=setup_sh, capture=False)
+ try:
+ docker_exec(self.args, self.container_name, [shell], data=setup_sh, capture=False)
+ except SubprocessError:
+ display.info(f'Checking container "{self.container_name}" logs...')
+ docker_logs(self.args, self.container_name)
+ raise
def deprovision(self) -> None:
"""Deprovision the host after delegation has completed."""
- if not self.container_name:
- return # provision was never called or did not succeed, so there is no container to remove
-
- if self.args.docker_terminate == TerminateMode.ALWAYS or (self.args.docker_terminate == TerminateMode.SUCCESS and self.args.success):
- docker_rm(self.args, self.container_name)
+ container_exists = False
+
+ if self.container_name:
+ if self.args.docker_terminate == TerminateMode.ALWAYS or (self.args.docker_terminate == TerminateMode.SUCCESS and self.args.success):
+ docker_rm(self.args, self.container_name)
+ else:
+ container_exists = True
+
+ if self.cgroup_path:
+ if container_exists:
+ display.notice(f'Remember to run `{require_docker().command} rm -f {self.container_name}` when finished testing. '
+ f'Then run `{shlex.join(self.delete_systemd_cgroup_v1_command)}` on the container host.')
+ else:
+ self.delete_systemd_cgroup_v1()
+ elif container_exists:
+ display.notice(f'Remember to run `{require_docker().command} rm -f {self.container_name}` when finished testing.')
def wait(self) -> None:
"""Wait for the instance to be ready. Executed before delegation for the controller and after delegation for targets."""
if not self.controller:
con = self.get_controller_target_connections()[0]
+ last_error = ''
- for dummy in range(1, 60):
+ for dummy in range(1, 10):
try:
con.run(['id'], capture=True)
except SubprocessError as ex:
if 'Permission denied' in ex.message:
raise
+ last_error = str(ex)
time.sleep(1)
else:
return
+ display.info('Checking SSH debug output...')
+ display.info(last_error)
+
+ if not self.args.delegate and not self.args.host_path:
+ def callback() -> None:
+ """Callback to run during error display."""
+ self.on_target_failure() # when the controller is not delegated, report failures immediately
+ else:
+ callback = None
+
+ raise HostConnectionError(f'Timeout waiting for {self.config.name} container {self.container_name}.', callback)
+
def get_controller_target_connections(self) -> list[SshConnection]:
"""Return SSH connection(s) for accessing the host as a target from the controller."""
containers = get_container_database(self.args)
@@ -411,6 +1019,10 @@ class DockerProfile(ControllerHostProfile[DockerConfig], SshTargetHostProfile[Do
port=port,
identity_file=SshKey(self.args).key,
python_interpreter=self.python.path,
+ # CentOS 6 uses OpenSSH 5.3, making it incompatible with the default configuration of OpenSSH 8.8 and later clients.
+ # Since only CentOS 6 is affected, and it is only supported by ansible-core 2.12, support for RSA SHA-1 is simply hard-coded here.
+ # A substring is used to allow custom containers to work, not just the one provided with ansible-test.
+ enable_rsa_sha1='centos6' in self.config.image,
)
return [SshConnection(self.args, settings)]
@@ -423,12 +1035,33 @@ class DockerProfile(ControllerHostProfile[DockerConfig], SshTargetHostProfile[Do
"""Return the working directory for the host."""
return '/root'
- def get_docker_run_options(self) -> list[str]:
+ def on_target_failure(self) -> None:
+ """Executed during failure handling if this profile is a target."""
+ display.info(f'Checking container "{self.container_name}" logs...')
+
+ try:
+ docker_logs(self.args, self.container_name)
+ except SubprocessError as ex:
+ display.error(str(ex))
+
+ if self.config.cgroup != CGroupVersion.NONE:
+ # Containers with cgroup support are assumed to be running systemd.
+ display.info(f'Checking container "{self.container_name}" systemd logs...')
+
+ try:
+ docker_exec(self.args, self.container_name, ['journalctl'], capture=False)
+ except SubprocessError as ex:
+ display.error(str(ex))
+
+ display.error(f'Connection to container "{self.container_name}" failed. See logs and original error above.')
+
+ def get_common_run_options(self) -> list[str]:
"""Return a list of options needed to run the container."""
options = [
- '--volume', '/sys/fs/cgroup:/sys/fs/cgroup:ro',
- f'--privileged={str(self.config.privileged).lower()}',
- # These temporary mount points need to be created at run time.
+ # These temporary mount points need to be created at run time when using Docker.
+ # They are automatically provided by Podman, but will be overridden by VOLUME instructions for the container, if they exist.
+ # If supporting containers with VOLUME instructions is not desired, these options could be limited to use with Docker.
+ # See: https://github.com/containers/podman/pull/1318
# Previously they were handled by the VOLUME instruction during container image creation.
# However, that approach creates anonymous volumes when running the container, which are then left behind after the container is deleted.
# These options eliminate the need for the VOLUME instruction, and override it if they are present.
@@ -439,6 +1072,9 @@ class DockerProfile(ControllerHostProfile[DockerConfig], SshTargetHostProfile[Do
'--tmpfs', '/run/lock', # some systemd containers require a separate tmpfs here, such as Ubuntu 20.04 and Ubuntu 22.04
]
+ if self.config.privileged:
+ options.append('--privileged')
+
if self.config.memory:
options.extend([
f'--memory={self.config.memory}',
@@ -478,6 +1114,12 @@ class NetworkRemoteProfile(RemoteProfile[NetworkRemoteConfig]):
ansible_port=connection.port,
ansible_user=connection.username,
ansible_ssh_private_key_file=core_ci.ssh_key.key,
+ # VyOS 1.1.8 uses OpenSSH 5.5, making it incompatible with RSA SHA-256/512 used by Paramiko 2.9 and later.
+ # IOS CSR 1000V uses an ancient SSH server, making it incompatible with RSA SHA-256/512 used by Paramiko 2.9 and later.
+ # That means all network platforms currently offered by ansible-core-ci require support for RSA SHA-1, so it is simply hard-coded here.
+ # NOTE: This option only exists in ansible-core 2.14 and later. For older ansible-core versions, use of Paramiko 2.8.x or earlier is required.
+ # See: https://github.com/ansible/ansible/pull/78789
+ # See: https://github.com/ansible/ansible/pull/78842
ansible_paramiko_use_rsa_sha2_algorithms='no',
ansible_network_os=f'{self.config.collection}.{self.config.platform}' if self.config.collection else self.config.platform,
)
@@ -509,7 +1151,7 @@ class NetworkRemoteProfile(RemoteProfile[NetworkRemoteConfig]):
else:
return
- raise ApplicationError(f'Timeout waiting for {self.config.name} instance {core_ci.instance_id}.')
+ raise HostConnectionError(f'Timeout waiting for {self.config.name} instance {core_ci.instance_id}.')
def get_controller_target_connections(self) -> list[SshConnection]:
"""Return SSH connection(s) for accessing the host as a target from the controller."""
@@ -521,6 +1163,10 @@ class NetworkRemoteProfile(RemoteProfile[NetworkRemoteConfig]):
port=core_ci.connection.port,
user=core_ci.connection.username,
identity_file=core_ci.ssh_key.key,
+ # VyOS 1.1.8 uses OpenSSH 5.5, making it incompatible with the default configuration of OpenSSH 8.8 and later clients.
+ # IOS CSR 1000V uses an ancient SSH server, making it incompatible with the default configuration of OpenSSH 8.8 and later clients.
+ # That means all network platforms currently offered by ansible-core-ci require support for RSA SHA-1, so it is simply hard-coded here.
+ enable_rsa_sha1=True,
)
return [SshConnection(self.args, settings)]
@@ -599,12 +1245,12 @@ class PosixRemoteProfile(ControllerHostProfile[PosixRemoteConfig], RemoteProfile
try:
return self.get_working_directory()
except SubprocessError as ex:
- if 'Permission denied' in ex.message:
- raise
-
+ # No "Permission denied" check is performed here.
+ # Unlike containers, with remote instances, user configuration isn't guaranteed to have been completed before SSH connections are attempted.
+ display.warning(str(ex))
time.sleep(10)
- raise ApplicationError(f'Timeout waiting for {self.config.name} instance {core_ci.instance_id}.')
+ raise HostConnectionError(f'Timeout waiting for {self.config.name} instance {core_ci.instance_id}.')
def get_controller_target_connections(self) -> list[SshConnection]:
"""Return SSH connection(s) for accessing the host as a target from the controller."""
@@ -740,7 +1386,7 @@ class WindowsRemoteProfile(RemoteProfile[WindowsRemoteConfig]):
else:
return
- raise ApplicationError(f'Timeout waiting for {self.config.name} instance {core_ci.instance_id}.')
+ raise HostConnectionError(f'Timeout waiting for {self.config.name} instance {core_ci.instance_id}.')
def get_controller_target_connections(self) -> list[SshConnection]:
"""Return SSH connection(s) for accessing the host as a target from the controller."""
diff --git a/test/lib/ansible_test/_internal/inventory.py b/test/lib/ansible_test/_internal/inventory.py
index 9cfd4394..6abf9ede 100644
--- a/test/lib/ansible_test/_internal/inventory.py
+++ b/test/lib/ansible_test/_internal/inventory.py
@@ -25,6 +25,10 @@ from .host_profiles import (
WindowsRemoteProfile,
)
+from .ssh import (
+ ssh_options_to_str,
+)
+
def create_controller_inventory(args: EnvironmentConfig, path: str, controller_host: ControllerHostProfile) -> None:
"""Create and return inventory for use in controller-only integration tests."""
@@ -149,6 +153,7 @@ def create_posix_inventory(args: EnvironmentConfig, path: str, target_hosts: lis
ansible_port=ssh.settings.port,
ansible_user=ssh.settings.user,
ansible_ssh_private_key_file=ssh.settings.identity_file,
+ ansible_ssh_extra_args=ssh_options_to_str(ssh.settings.options),
)
if ssh.become:
diff --git a/test/lib/ansible_test/_internal/provisioning.py b/test/lib/ansible_test/_internal/provisioning.py
index 42de521d..8f914c2a 100644
--- a/test/lib/ansible_test/_internal/provisioning.py
+++ b/test/lib/ansible_test/_internal/provisioning.py
@@ -19,6 +19,7 @@ from .config import (
from .util import (
ApplicationError,
+ HostConnectionError,
display,
open_binary_file,
verify_sys_executable,
@@ -99,7 +100,7 @@ def prepare_profiles(
args: TEnvironmentConfig,
targets_use_pypi: bool = False,
skip_setup: bool = False,
- requirements: t.Optional[c.Callable[[TEnvironmentConfig, HostState], None]] = None,
+ requirements: t.Optional[c.Callable[[HostProfile], None]] = None,
) -> HostState:
"""
Create new profiles, or load existing ones, and return them.
@@ -139,7 +140,7 @@ def prepare_profiles(
check_controller_python(args, host_state)
if requirements:
- requirements(args, host_state)
+ requirements(host_state.controller_profile)
def configure(profile: HostProfile) -> None:
"""Configure the given profile."""
@@ -148,6 +149,9 @@ def prepare_profiles(
if not skip_setup:
profile.configure()
+ if requirements:
+ requirements(profile)
+
dispatch_jobs([(profile, WrappedThread(functools.partial(configure, profile))) for profile in host_state.target_profiles])
return host_state
@@ -185,13 +189,26 @@ def dispatch_jobs(jobs: list[tuple[HostProfile, WrappedThread]]) -> None:
time.sleep(1)
failed = False
+ connection_failures = 0
for profile, thread in jobs:
try:
thread.wait_for_result()
+ except HostConnectionError as ex:
+ display.error(f'Host {profile.config} connection failed:\n{ex}')
+ failed = True
+ connection_failures += 1
+ except ApplicationError as ex:
+ display.error(f'Host {profile.config} job failed:\n{ex}')
+ failed = True
except Exception as ex: # pylint: disable=broad-except
- display.error(f'Host {profile} job failed: {ex}\n{"".join(traceback.format_tb(ex.__traceback__))}')
+ name = f'{"" if ex.__class__.__module__ == "builtins" else ex.__class__.__module__ + "."}{ex.__class__.__qualname__}'
+ display.error(f'Host {profile.config} job failed:\nTraceback (most recent call last):\n'
+ f'{"".join(traceback.format_tb(ex.__traceback__)).rstrip()}\n{name}: {ex}')
failed = True
+ if connection_failures:
+ raise HostConnectionError(f'Host job(s) failed, including {connection_failures} connection failure(s). See previous error(s) for details.')
+
if failed:
raise ApplicationError('Host job(s) failed. See previous error(s) for details.')
diff --git a/test/lib/ansible_test/_internal/ssh.py b/test/lib/ansible_test/_internal/ssh.py
index a5b40c8b..fd01ff25 100644
--- a/test/lib/ansible_test/_internal/ssh.py
+++ b/test/lib/ansible_test/_internal/ssh.py
@@ -2,6 +2,7 @@
from __future__ import annotations
import dataclasses
+import itertools
import json
import os
import random
@@ -38,10 +39,40 @@ class SshConnectionDetail:
identity_file: str
python_interpreter: t.Optional[str] = None
shell_type: t.Optional[str] = None
+ enable_rsa_sha1: bool = False
def __post_init__(self):
self.name = sanitize_host_name(self.name)
+ @property
+ def options(self) -> dict[str, str]:
+ """OpenSSH config options, which can be passed to the `ssh` CLI with the `-o` argument."""
+ options: dict[str, str] = {}
+
+ if self.enable_rsa_sha1:
+ # Newer OpenSSH clients connecting to older SSH servers must explicitly enable ssh-rsa support.
+ # OpenSSH 8.8, released on 2021-09-26, deprecated using RSA with the SHA-1 hash algorithm (ssh-rsa).
+ # OpenSSH 7.2, released on 2016-02-29, added support for using RSA with SHA-256/512 hash algorithms.
+ # See: https://www.openssh.com/txt/release-8.8
+ algorithms = '+ssh-rsa' # append the algorithm to the default list, requires OpenSSH 7.0 or later
+
+ options.update(dict(
+ # Host key signature algorithms that the client wants to use.
+ # Available options can be found with `ssh -Q HostKeyAlgorithms` or `ssh -Q key` on older clients.
+ # This option was updated in OpenSSH 7.0, released on 2015-08-11, to support the "+" prefix.
+ # See: https://www.openssh.com/txt/release-7.0
+ HostKeyAlgorithms=algorithms,
+ # Signature algorithms that will be used for public key authentication.
+ # Available options can be found with `ssh -Q PubkeyAcceptedAlgorithms` or `ssh -Q key` on older clients.
+ # This option was added in OpenSSH 7.0, released on 2015-08-11.
+ # See: https://www.openssh.com/txt/release-7.0
+ # This option is an alias for PubkeyAcceptedAlgorithms, which was added in OpenSSH 8.5.
+ # See: https://www.openssh.com/txt/release-8.5
+ PubkeyAcceptedKeyTypes=algorithms,
+ ))
+
+ return options
+
class SshProcess:
"""Wrapper around an SSH process."""
@@ -141,7 +172,7 @@ def create_ssh_command(
if ssh.user:
cmd.extend(['-l', ssh.user]) # user to log in as on the remote machine
- ssh_options = dict(
+ ssh_options: dict[str, t.Union[int, str]] = dict(
BatchMode='yes',
ExitOnForwardFailure='yes',
LogLevel='ERROR',
@@ -153,9 +184,7 @@ def create_ssh_command(
ssh_options.update(options or {})
- for key, value in sorted(ssh_options.items()):
- cmd.extend(['-o', '='.join([key, str(value)])])
-
+ cmd.extend(ssh_options_to_list(ssh_options))
cmd.extend(cli_args or [])
cmd.append(ssh.host)
@@ -165,6 +194,18 @@ def create_ssh_command(
return cmd
+def ssh_options_to_list(options: t.Union[dict[str, t.Union[int, str]], dict[str, str]]) -> list[str]:
+ """Format a dictionary of SSH options as a list suitable for passing to the `ssh` command."""
+ return list(itertools.chain.from_iterable(
+ ('-o', f'{key}={value}') for key, value in sorted(options.items())
+ ))
+
+
+def ssh_options_to_str(options: t.Union[dict[str, t.Union[int, str]], dict[str, str]]) -> str:
+ """Format a dictionary of SSH options as a string suitable for passing as `ansible_ssh_extra_args` in inventory."""
+ return shlex.join(ssh_options_to_list(options))
+
+
def run_ssh_command(
args: EnvironmentConfig,
ssh: SshConnectionDetail,
@@ -245,7 +286,7 @@ def generate_ssh_inventory(ssh_connections: list[SshConnectionDetail]) -> str:
ansible_pipelining='yes',
ansible_python_interpreter=ssh.python_interpreter,
ansible_shell_type=ssh.shell_type,
- ansible_ssh_extra_args='-o UserKnownHostsFile=/dev/null', # avoid changing the test environment
+ ansible_ssh_extra_args=ssh_options_to_str(dict(UserKnownHostsFile='/dev/null', **ssh.options)), # avoid changing the test environment
ansible_ssh_host_key_checking='no',
))) for ssh in ssh_connections),
),
diff --git a/test/lib/ansible_test/_internal/target.py b/test/lib/ansible_test/_internal/target.py
index 10dbfd96..4e04b10a 100644
--- a/test/lib/ansible_test/_internal/target.py
+++ b/test/lib/ansible_test/_internal/target.py
@@ -703,6 +703,8 @@ class IntegrationTarget(CompletionTarget):
# configuration
+ self.retry_never = 'retry/never/' in self.aliases
+
self.setup_once = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('setup/once/'))))
self.setup_always = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('setup/always/'))))
self.needs_target = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('needs/target/'))))
diff --git a/test/lib/ansible_test/_internal/thread.py b/test/lib/ansible_test/_internal/thread.py
index db11ad48..d0ed1bab 100644
--- a/test/lib/ansible_test/_internal/thread.py
+++ b/test/lib/ansible_test/_internal/thread.py
@@ -2,6 +2,7 @@
from __future__ import annotations
import collections.abc as c
+import contextlib
import functools
import sys
import threading
@@ -60,3 +61,25 @@ def mutex(func: TCallable) -> TCallable:
return func(*args, **kwargs)
return wrapper # type: ignore[return-value] # requires https://www.python.org/dev/peps/pep-0612/ support
+
+
+__named_lock = threading.Lock()
+__named_locks: dict[str, threading.Lock] = {}
+
+
+@contextlib.contextmanager
+def named_lock(name: str) -> c.Iterator[bool]:
+ """
+ Context manager that provides named locks using threading.Lock instances.
+ Once named lock instances are created they are not deleted.
+ Returns True if this is the first instance of the named lock, otherwise False.
+ """
+ with __named_lock:
+ if lock_instance := __named_locks.get(name):
+ first = False
+ else:
+ first = True
+ lock_instance = __named_locks[name] = threading.Lock()
+
+ with lock_instance:
+ yield first
diff --git a/test/lib/ansible_test/_internal/util.py b/test/lib/ansible_test/_internal/util.py
index 11bfc107..12316239 100644
--- a/test/lib/ansible_test/_internal/util.py
+++ b/test/lib/ansible_test/_internal/util.py
@@ -946,6 +946,23 @@ class MissingEnvironmentVariable(ApplicationError):
self.name = name
+class HostConnectionError(ApplicationError):
+ """
+ Raised when the initial connection during host profile setup has failed and all retries have been exhausted.
+ Raised by provisioning code when one or more provisioning threads raise this exception.
+ Also raised when an SSH connection fails for the shell command.
+ """
+ def __init__(self, message: str, callback: t.Callable[[], None] = None) -> None:
+ super().__init__(message)
+
+ self._callback = callback
+
+ def run_callback(self) -> None:
+ """Run the error callback, if any."""
+ if self._callback:
+ self._callback()
+
+
def retry(func, ex_type=SubprocessError, sleep=10, attempts=10, warn=True):
"""Retry the specified function on failure."""
for dummy in range(1, attempts):
diff --git a/test/lib/ansible_test/_util/controller/sanity/pylint/config/ansible-test.cfg b/test/lib/ansible_test/_util/controller/sanity/pylint/config/ansible-test.cfg
index 9424410e..1c03472c 100644
--- a/test/lib/ansible_test/_util/controller/sanity/pylint/config/ansible-test.cfg
+++ b/test/lib/ansible_test/_util/controller/sanity/pylint/config/ansible-test.cfg
@@ -9,6 +9,7 @@ disable=
import-outside-toplevel, # common pattern in ansible related code
raise-missing-from, # Python 2.x does not support raise from
too-few-public-methods,
+ too-many-public-methods,
too-many-arguments,
too-many-branches,
too-many-instance-attributes,
diff --git a/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/ps_argspec.ps1 b/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/ps_argspec.ps1
index 23610e3e..4183b2be 100644
--- a/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/ps_argspec.ps1
+++ b/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/ps_argspec.ps1
@@ -101,13 +101,21 @@ Add-CSharpType -References @(Get-Content -LiteralPath $manifest.ansible_basic -R
$powershell.AddScript($module_code) > $null
$powershell.Invoke() > $null
+$arg_spec = $powershell.Runspace.SessionStateProxy.GetVariable('ansibleTestArgSpec')
+
+if (-not $arg_spec) {
+ $err = $powershell.Streams.Error
+ if ($err) {
+ $err
+ }
+ else {
+ "Unknown error trying to get PowerShell arg spec"
+ }
-if ($powershell.HadErrors) {
- $powershell.Streams.Error
exit 1
}
-$arg_spec = $powershell.Runspace.SessionStateProxy.GetVariable('ansibleTestArgSpec')
+
Resolve-CircularReference -Hash $arg_spec
ConvertTo-Json -InputObject $arg_spec -Compress -Depth 99
diff --git a/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/utils.py b/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/utils.py
index 5b20db8d..88d5b01a 100644
--- a/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/utils.py
+++ b/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/utils.py
@@ -154,11 +154,9 @@ def parse_yaml(value, lineno, module, name, load_all=False, ansible_loader=False
if load_all:
data = list(data)
except yaml.MarkedYAMLError as e:
- e.problem_mark.line += lineno - 1
- e.problem_mark.name = '%s.%s' % (module, name)
errors.append({
'msg': '%s is not valid YAML' % name,
- 'line': e.problem_mark.line + 1,
+ 'line': e.problem_mark.line + lineno,
'column': e.problem_mark.column + 1
})
traces.append(e)
diff --git a/test/lib/ansible_test/_util/target/setup/bootstrap.sh b/test/lib/ansible_test/_util/target/setup/bootstrap.sh
index b1be8436..732c122a 100644
--- a/test/lib/ansible_test/_util/target/setup/bootstrap.sh
+++ b/test/lib/ansible_test/_util/target/setup/bootstrap.sh
@@ -427,6 +427,9 @@ bootstrap()
install_ssh_keys
customize_bashrc
+ # allow tests to detect ansible-test bootstrapped instances, as well as the bootstrap type
+ echo "${bootstrap_type}" > /etc/ansible-test.bootstrap
+
case "${bootstrap_type}" in
"docker") bootstrap_docker ;;
"remote") bootstrap_remote ;;
diff --git a/test/lib/ansible_test/_util/target/setup/check_systemd_cgroup_v1.sh b/test/lib/ansible_test/_util/target/setup/check_systemd_cgroup_v1.sh
new file mode 100644
index 00000000..3b05a3f4
--- /dev/null
+++ b/test/lib/ansible_test/_util/target/setup/check_systemd_cgroup_v1.sh
@@ -0,0 +1,17 @@
+# shellcheck shell=sh
+
+set -eu
+
+>&2 echo "@MARKER@"
+
+cgroup_path="$(awk -F: '$2 ~ /^name=systemd$/ { print "/sys/fs/cgroup/systemd"$3 }' /proc/1/cgroup)"
+
+if [ "${cgroup_path}" ] && [ -d "${cgroup_path}" ]; then
+ probe_path="${cgroup_path%/}/ansible-test-probe-@LABEL@"
+ mkdir "${probe_path}"
+ rmdir "${probe_path}"
+ exit 0
+fi
+
+>&2 echo "No systemd cgroup v1 hierarchy found"
+exit 1
diff --git a/test/lib/ansible_test/_util/target/setup/probe_cgroups.py b/test/lib/ansible_test/_util/target/setup/probe_cgroups.py
new file mode 100644
index 00000000..2ac7ecb0
--- /dev/null
+++ b/test/lib/ansible_test/_util/target/setup/probe_cgroups.py
@@ -0,0 +1,31 @@
+"""A tool for probing cgroups to determine write access."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import sys
+
+
+def main(): # type: () -> None
+ """Main program entry point."""
+ probe_dir = sys.argv[1]
+ paths = sys.argv[2:]
+ results = {}
+
+ for path in paths:
+ probe_path = os.path.join(path, probe_dir)
+
+ try:
+ os.mkdir(probe_path)
+ os.rmdir(probe_path)
+ except Exception as ex: # pylint: disable=broad-except
+ results[path] = str(ex)
+ else:
+ results[path] = None
+
+ print(json.dumps(results, sort_keys=True))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/units/galaxy/test_collection.py b/test/units/galaxy/test_collection.py
index 28a69b28..106251c5 100644
--- a/test/units/galaxy/test_collection.py
+++ b/test/units/galaxy/test_collection.py
@@ -201,24 +201,6 @@ def manifest(manifest_info):
yield fake_file, sha256(b_data).hexdigest()
-@pytest.fixture()
-def server_config(monkeypatch):
- monkeypatch.setattr(C, 'GALAXY_SERVER_LIST', ['server1', 'server2', 'server3'])
-
- default_options = dict((k, None) for k, v, t in SERVER_DEF)
-
- server1 = dict(default_options)
- server1.update({'url': 'https://galaxy.ansible.com/api/', 'validate_certs': False})
-
- server2 = dict(default_options)
- server2.update({'url': 'https://galaxy.ansible.com/api/', 'validate_certs': True})
-
- server3 = dict(default_options)
- server3.update({'url': 'https://galaxy.ansible.com/api/'})
-
- return server1, server2, server3
-
-
@pytest.mark.parametrize(
'required_signature_count,valid',
[
@@ -340,8 +322,18 @@ def test_validate_certs(global_ignore_certs, monkeypatch):
assert galaxy_cli.api_servers[0].validate_certs is not global_ignore_certs
-@pytest.mark.parametrize('global_ignore_certs', [True, False])
-def test_validate_certs_with_server_url(global_ignore_certs, monkeypatch):
+@pytest.mark.parametrize(
+ ["ignore_certs_cli", "ignore_certs_cfg", "expected_validate_certs"],
+ [
+ (None, None, True),
+ (None, True, False),
+ (None, False, True),
+ (True, None, False),
+ (True, True, False),
+ (True, False, False),
+ ]
+)
+def test_validate_certs_with_server_url(ignore_certs_cli, ignore_certs_cfg, expected_validate_certs, monkeypatch):
cli_args = [
'ansible-galaxy',
'collection',
@@ -350,8 +342,10 @@ def test_validate_certs_with_server_url(global_ignore_certs, monkeypatch):
'-s',
'https://galaxy.ansible.com'
]
- if global_ignore_certs:
+ if ignore_certs_cli:
cli_args.append('--ignore-certs')
+ if ignore_certs_cfg is not None:
+ monkeypatch.setattr(C, 'GALAXY_IGNORE_CERTS', ignore_certs_cfg)
galaxy_cli = GalaxyCLI(args=cli_args)
mock_execute_install = MagicMock()
@@ -359,34 +353,62 @@ def test_validate_certs_with_server_url(global_ignore_certs, monkeypatch):
galaxy_cli.run()
assert len(galaxy_cli.api_servers) == 1
- assert galaxy_cli.api_servers[0].validate_certs is not global_ignore_certs
-
-
-@pytest.mark.parametrize('global_ignore_certs', [True, False])
-def test_validate_certs_with_server_config(global_ignore_certs, server_config, monkeypatch):
+ assert galaxy_cli.api_servers[0].validate_certs == expected_validate_certs
- # test sidesteps real resolution and forces the server config to override the cli option
- get_plugin_options = MagicMock(side_effect=server_config)
- monkeypatch.setattr(C.config, 'get_plugin_options', get_plugin_options)
+@pytest.mark.parametrize(
+ ["ignore_certs_cli", "ignore_certs_cfg", "expected_server2_validate_certs", "expected_server3_validate_certs"],
+ [
+ (None, None, True, True),
+ (None, True, True, False),
+ (None, False, True, True),
+ (True, None, False, False),
+ (True, True, False, False),
+ (True, False, False, False),
+ ]
+)
+def test_validate_certs_server_config(ignore_certs_cfg, ignore_certs_cli, expected_server2_validate_certs, expected_server3_validate_certs, monkeypatch):
+ server_names = ['server1', 'server2', 'server3']
+ cfg_lines = [
+ "[galaxy]",
+ "server_list=server1,server2,server3",
+ "[galaxy_server.server1]",
+ "url=https://galaxy.ansible.com/api/",
+ "validate_certs=False",
+ "[galaxy_server.server2]",
+ "url=https://galaxy.ansible.com/api/",
+ "validate_certs=True",
+ "[galaxy_server.server3]",
+ "url=https://galaxy.ansible.com/api/",
+ ]
cli_args = [
'ansible-galaxy',
'collection',
'install',
'namespace.collection:1.0.0',
]
- if global_ignore_certs:
+ if ignore_certs_cli:
cli_args.append('--ignore-certs')
+ if ignore_certs_cfg is not None:
+ monkeypatch.setattr(C, 'GALAXY_IGNORE_CERTS', ignore_certs_cfg)
- galaxy_cli = GalaxyCLI(args=cli_args)
- mock_execute_install = MagicMock()
- monkeypatch.setattr(galaxy_cli, '_execute_install_collection', mock_execute_install)
- galaxy_cli.run()
+ monkeypatch.setattr(C, 'GALAXY_SERVER_LIST', server_names)
+
+ with tempfile.NamedTemporaryFile(suffix='.cfg') as tmp_file:
+ tmp_file.write(to_bytes('\n'.join(cfg_lines), errors='surrogate_or_strict'))
+ tmp_file.flush()
+
+ monkeypatch.setattr(C.config, '_config_file', tmp_file.name)
+ C.config._parse_config_file()
+ galaxy_cli = GalaxyCLI(args=cli_args)
+ mock_execute_install = MagicMock()
+ monkeypatch.setattr(galaxy_cli, '_execute_install_collection', mock_execute_install)
+ galaxy_cli.run()
- # server cfg, so should match def above, if not specified so it should use default (true)
- assert galaxy_cli.api_servers[0].validate_certs is server_config[0].get('validate_certs', True)
- assert galaxy_cli.api_servers[1].validate_certs is server_config[1].get('validate_certs', True)
- assert galaxy_cli.api_servers[2].validate_certs is server_config[2].get('validate_certs', True)
+ # (not) --ignore-certs > server's validate_certs > (not) GALAXY_IGNORE_CERTS > True
+ assert galaxy_cli.api_servers[0].validate_certs is False
+ assert galaxy_cli.api_servers[1].validate_certs is expected_server2_validate_certs
+ assert galaxy_cli.api_servers[2].validate_certs is expected_server3_validate_certs
def test_build_collection_no_galaxy_yaml():
diff --git a/test/units/module_utils/common/arg_spec/test_aliases.py b/test/units/module_utils/common/arg_spec/test_aliases.py
index 1c1e243a..7d30fb0f 100644
--- a/test/units/module_utils/common/arg_spec/test_aliases.py
+++ b/test/units/module_utils/common/arg_spec/test_aliases.py
@@ -57,7 +57,12 @@ ALIAS_TEST_CASES = [
'path': '/tmp',
'not_yo_path': '/tmp',
},
- {'version': '1.7', 'date': None, 'collection_name': None, 'name': 'not_yo_path'},
+ {
+ 'version': '1.7',
+ 'date': None,
+ 'collection_name': None,
+ 'msg': "Alias 'not_yo_path' is deprecated. See the module docs for more information",
+ },
"",
)
]
diff --git a/test/units/module_utils/common/arg_spec/test_module_validate.py b/test/units/module_utils/common/arg_spec/test_module_validate.py
index 5041d521..2c2211c9 100644
--- a/test/units/module_utils/common/arg_spec/test_module_validate.py
+++ b/test/units/module_utils/common/arg_spec/test_module_validate.py
@@ -49,7 +49,7 @@ def test_module_alias_deprecations_warnings(monkeypatch):
{
'collection_name': None,
'date': '2020-03-04',
- 'name': 'flamethrower',
+ 'msg': "Alias 'flamethrower' is deprecated. See the module docs for more information",
'version': None,
}
]