summaryrefslogtreecommitdiff
path: root/test/integration/targets
diff options
context:
space:
mode:
authorLee Garrett <lgarrett@rocketjump.eu>2023-01-31 00:05:27 +0100
committerLee Garrett <lgarrett@rocketjump.eu>2023-01-31 00:05:27 +0100
commit520506f035967306d0548a8f69a0fea3181dca35 (patch)
tree6bc71745aeee6208f8382bd067b6c9e1c9a02e2a /test/integration/targets
parent46bbbf9f8e527b7ab4329a0aa16e3d38bfbb0c13 (diff)
downloaddebian-ansible-core-520506f035967306d0548a8f69a0fea3181dca35.zip
New upstream version 2.14.2
Diffstat (limited to 'test/integration/targets')
-rw-r--r--test/integration/targets/ansible-test-container/aliases5
-rwxr-xr-xtest/integration/targets/ansible-test-container/runme.py1090
-rwxr-xr-xtest/integration/targets/ansible-test-container/runme.sh5
-rw-r--r--test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/col/plugins/modules/invalid_yaml_syntax.py27
-rw-r--r--test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/README.rst3
-rw-r--r--test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/galaxy.yml6
-rw-r--r--test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/meta/main.yml1
-rw-r--r--test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/plugins/modules/failure_ps.ps116
-rw-r--r--test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/plugins/modules/failure_ps.yml31
-rw-r--r--test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/plugins/module_utils/share_module.psm119
-rw-r--r--test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/plugins/modules/in_function.ps17
-rw-r--r--test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/plugins/modules/in_function.yml25
-rw-r--r--test/integration/targets/ansible-test-sanity-validate-modules/expected.txt5
-rwxr-xr-xtest/integration/targets/ansible-test-sanity-validate-modules/runme.sh19
-rw-r--r--test/integration/targets/argspec/library/argspec.py104
-rw-r--r--test/integration/targets/argspec/tasks/main.yml211
-rw-r--r--test/integration/targets/blocks/79711.yml17
-rwxr-xr-xtest/integration/targets/blocks/runme.sh9
-rw-r--r--test/integration/targets/file/tasks/main.yml76
-rw-r--r--test/integration/targets/get_url/tasks/ciphers.yml2
-rw-r--r--test/integration/targets/inventory_script/inventory.json4
-rw-r--r--test/integration/targets/reboot/aliases7
-rw-r--r--test/integration/targets/reboot/tasks/main.yml68
-rw-r--r--test/integration/targets/roles_arg_spec/test_complex_role_fails.yml27
-rw-r--r--test/integration/targets/setup_epel/tasks/main.yml5
-rw-r--r--test/integration/targets/var_templating/ansible_debug_template.j21
-rwxr-xr-xtest/integration/targets/var_templating/runme.sh3
-rw-r--r--test/integration/targets/var_templating/test_vars_with_sources.yml9
-rw-r--r--test/integration/targets/yum/tasks/yuminstallroot.yml9
29 files changed, 1762 insertions, 49 deletions
diff --git a/test/integration/targets/ansible-test-container/aliases b/test/integration/targets/ansible-test-container/aliases
new file mode 100644
index 00000000..65a05093
--- /dev/null
+++ b/test/integration/targets/ansible-test-container/aliases
@@ -0,0 +1,5 @@
+shippable/posix/group6
+context/controller
+needs/root
+destructive
+retry/never # tests on some platforms run too long to make retries useful
diff --git a/test/integration/targets/ansible-test-container/runme.py b/test/integration/targets/ansible-test-container/runme.py
new file mode 100755
index 00000000..68712805
--- /dev/null
+++ b/test/integration/targets/ansible-test-container/runme.py
@@ -0,0 +1,1090 @@
+#!/usr/bin/env python
+"""Test suite used to verify ansible-test is able to run its containers on various container hosts."""
+
+from __future__ import annotations
+
+import abc
+import dataclasses
+import datetime
+import errno
+import functools
+import json
+import os
+import pathlib
+import pwd
+import re
+import secrets
+import shlex
+import shutil
+import signal
+import subprocess
+import sys
+import time
+import typing as t
+
+UNPRIVILEGED_USER_NAME = 'ansible-test'
+CGROUP_SYSTEMD = pathlib.Path('/sys/fs/cgroup/systemd')
+LOG_PATH = pathlib.Path('/tmp/results')
+
+# The value of /proc/*/loginuid when it is not set.
+# It is a reserved UID, which is the maximum 32-bit unsigned integer value.
+# See: https://access.redhat.com/solutions/25404
+LOGINUID_NOT_SET = 4294967295
+
+UID = os.getuid()
+
+try:
+ LOGINUID = int(pathlib.Path('/proc/self/loginuid').read_text())
+ LOGINUID_MISMATCH = LOGINUID != LOGINUID_NOT_SET and LOGINUID != UID
+except FileNotFoundError:
+ LOGINUID = None
+ LOGINUID_MISMATCH = False
+
+
+def main() -> None:
+ """Main program entry point."""
+ display.section('Startup check')
+
+ try:
+ bootstrap_type = pathlib.Path('/etc/ansible-test.bootstrap').read_text().strip()
+ except FileNotFoundError:
+ bootstrap_type = 'undefined'
+
+ display.info(f'Bootstrap type: {bootstrap_type}')
+
+ if bootstrap_type != 'remote':
+ display.warning('Skipping destructive test on system which is not an ansible-test remote provisioned instance.')
+ return
+
+ display.info(f'UID: {UID} / {LOGINUID}')
+
+ if UID != 0:
+ raise Exception('This test must be run as root.')
+
+ if not LOGINUID_MISMATCH:
+ if LOGINUID is None:
+ display.warning('Tests involving loginuid mismatch will be skipped on this host since it does not have audit support.')
+ elif LOGINUID == LOGINUID_NOT_SET:
+ display.warning('Tests involving loginuid mismatch will be skipped on this host since it is not set.')
+ elif LOGINUID == 0:
+ raise Exception('Use sudo, su, etc. as a non-root user to become root before running this test.')
+ else:
+ raise Exception()
+
+ display.section(f'Bootstrapping {os_release}')
+
+ bootstrapper = Bootstrapper.init()
+ bootstrapper.run()
+
+ result_dir = LOG_PATH
+
+ if result_dir.exists():
+ shutil.rmtree(result_dir)
+
+ result_dir.mkdir()
+ result_dir.chmod(0o777)
+
+ scenarios = get_test_scenarios()
+ results = [run_test(scenario) for scenario in scenarios]
+ error_total = 0
+
+ for name in sorted(result_dir.glob('*.log')):
+ lines = name.read_text().strip().splitlines()
+ error_count = len([line for line in lines if line.startswith('FAIL: ')])
+ error_total += error_count
+
+ display.section(f'Log ({error_count=}/{len(lines)}): {name.name}')
+
+ for line in lines:
+ if line.startswith('FAIL: '):
+ display.show(line, display.RED)
+ else:
+ display.show(line)
+
+ error_count = len([result for result in results if result.message])
+ error_total += error_count
+
+ duration = datetime.timedelta(seconds=int(sum(result.duration.total_seconds() for result in results)))
+
+ display.section(f'Test Results ({error_count=}/{len(results)}) [{duration}]')
+
+ for result in results:
+ notes = f' <cleanup: {", ".join(result.cleanup)}>' if result.cleanup else ''
+
+ if result.cgroup_dirs:
+ notes += f' <cgroup_dirs: {len(result.cgroup_dirs)}>'
+
+ notes += f' [{result.duration}]'
+
+ if result.message:
+ display.show(f'FAIL: {result.scenario} {result.message}{notes}', display.RED)
+ elif result.duration.total_seconds() >= 90:
+ display.show(f'SLOW: {result.scenario}{notes}', display.YELLOW)
+ else:
+ display.show(f'PASS: {result.scenario}{notes}')
+
+ if error_total:
+ sys.exit(1)
+
+
+def get_test_scenarios() -> list[TestScenario]:
+ """Generate and return a list of test scenarios."""
+
+ supported_engines = ('docker', 'podman')
+ available_engines = [engine for engine in supported_engines if shutil.which(engine)]
+
+ if not available_engines:
+ raise ApplicationError(f'No supported container engines found: {", ".join(supported_engines)}')
+
+ completion_lines = pathlib.Path(os.environ['PYTHONPATH'], '../test/lib/ansible_test/_data/completion/docker.txt').read_text().splitlines()
+
+ # TODO: consider including testing for the collection default image
+ entries = {name: value for name, value in (parse_completion_entry(line) for line in completion_lines) if name != 'default'}
+
+ unprivileged_user = User.get(UNPRIVILEGED_USER_NAME)
+
+ scenarios: list[TestScenario] = []
+
+ for container_name, settings in entries.items():
+ image = settings['image']
+ cgroup = settings.get('cgroup', 'v1-v2')
+
+ if container_name == 'centos6' and os_release.id == 'alpine':
+ # Alpine kernels do not emulate vsyscall by default, which causes the centos6 container to fail during init.
+ # See: https://unix.stackexchange.com/questions/478387/running-a-centos-docker-image-on-arch-linux-exits-with-code-139
+ # Other distributions enable settings which trap vsyscall by default.
+ # See: https://www.kernelconfig.io/config_legacy_vsyscall_xonly
+ # See: https://www.kernelconfig.io/config_legacy_vsyscall_emulate
+ continue
+
+ for engine in available_engines:
+ # TODO: figure out how to get tests passing using docker without disabling selinux
+ disable_selinux = os_release.id == 'fedora' and engine == 'docker' and cgroup != 'none'
+ expose_cgroup_v1 = cgroup == 'v1-only' and get_docker_info(engine).cgroup_version != 1
+ debug_systemd = cgroup != 'none'
+
+ # The sleep+pkill used to support the cgroup probe causes problems with the centos6 container.
+ # It results in sshd connections being refused or reset for many, but not all, container instances.
+ # The underlying cause of this issue is unknown.
+ probe_cgroups = container_name != 'centos6'
+
+ # The default RHEL 9 crypto policy prevents use of SHA-1.
+ # This results in SSH errors with centos6 containers: ssh_dispatch_run_fatal: Connection to 1.2.3.4 port 22: error in libcrypto
+ # See: https://access.redhat.com/solutions/6816771
+ enable_sha1 = os_release.id == 'rhel' and os_release.version_id.startswith('9.') and container_name == 'centos6'
+
+ if cgroup != 'none' and get_docker_info(engine).cgroup_version == 1 and not have_cgroup_systemd():
+ expose_cgroup_v1 = True # the host uses cgroup v1 but there is no systemd cgroup and the container requires cgroup support
+
+ user_scenarios = [
+ # TODO: test rootless docker
+ UserScenario(ssh=unprivileged_user),
+ ]
+
+ if engine == 'podman':
+ user_scenarios.append(UserScenario(ssh=ROOT_USER))
+
+ # TODO: test podman remote on Alpine and Ubuntu hosts
+ # TODO: combine remote with ssh using different unprivileged users
+ if os_release.id not in ('alpine', 'ubuntu'):
+ user_scenarios.append(UserScenario(remote=unprivileged_user))
+
+ if LOGINUID_MISMATCH:
+ user_scenarios.append(UserScenario())
+
+ for user_scenario in user_scenarios:
+ scenarios.append(
+ TestScenario(
+ user_scenario=user_scenario,
+ engine=engine,
+ container_name=container_name,
+ image=image,
+ disable_selinux=disable_selinux,
+ expose_cgroup_v1=expose_cgroup_v1,
+ enable_sha1=enable_sha1,
+ debug_systemd=debug_systemd,
+ probe_cgroups=probe_cgroups,
+ )
+ )
+
+ return scenarios
+
+
+def run_test(scenario: TestScenario) -> TestResult:
+ """Run a test scenario and return the test results."""
+ display.section(f'Testing {scenario} Started')
+
+ start = time.monotonic()
+
+ integration = ['ansible-test', 'integration', 'split']
+ integration_options = ['--target', f'docker:{scenario.container_name}', '--color', '--truncate', '0', '-v']
+ target_only_options = []
+
+ if scenario.debug_systemd:
+ integration_options.append('--dev-systemd-debug')
+
+ if scenario.probe_cgroups:
+ target_only_options = ['--dev-probe-cgroups', str(LOG_PATH)]
+
+ commands = [
+ # The cgroup probe is only performed for the first test of the target.
+ # There's no need to repeat the probe again for the same target.
+ # The controller will be tested separately as a target.
+ # This ensures that both the probe and no-probe code paths are functional.
+ [*integration, *integration_options, *target_only_options],
+ # For the split test we'll use alpine3 as the controller. There are two reasons for this:
+ # 1) It doesn't require the cgroup v1 hack, so we can test a target that doesn't need that.
+ # 2) It doesn't require disabling selinux, so we can test a target that doesn't need that.
+ [*integration, '--controller', 'docker:alpine3', *integration_options],
+ ]
+
+ common_env: dict[str, str] = {}
+ test_env: dict[str, str] = {}
+
+ if scenario.engine == 'podman':
+ if scenario.user_scenario.remote:
+ common_env.update(
+ # Podman 4.3.0 has a regression which requires a port for remote connections to work.
+ # See: https://github.com/containers/podman/issues/16509
+ CONTAINER_HOST=f'ssh://{scenario.user_scenario.remote.name}@localhost:22'
+ f'/run/user/{scenario.user_scenario.remote.pwnam.pw_uid}/podman/podman.sock',
+ CONTAINER_SSHKEY=str(pathlib.Path('~/.ssh/id_rsa').expanduser()), # TODO: add support for ssh + remote when the ssh user is not root
+ )
+
+ test_env.update(ANSIBLE_TEST_PREFER_PODMAN='1')
+
+ test_env.update(common_env)
+
+ if scenario.user_scenario.ssh:
+ client_become_cmd = ['ssh', f'{scenario.user_scenario.ssh.name}@localhost']
+ test_commands = [client_become_cmd + [f'cd ~/ansible; {format_env(test_env)}{sys.executable} bin/{shlex.join(command)}'] for command in commands]
+ else:
+ client_become_cmd = ['sh', '-c']
+ test_commands = [client_become_cmd + [f'{format_env(test_env)}{shlex.join(command)}'] for command in commands]
+
+ prime_storage_command = []
+
+ if scenario.engine == 'podman' and scenario.user_scenario.actual.name == UNPRIVILEGED_USER_NAME:
+ # When testing podman we need to make sure that the overlay filesystem is used instead of vfs.
+ # Using the vfs filesystem will result in running out of disk space during the tests.
+ # To change the filesystem used, the existing storage directory must be removed before "priming" the storage database.
+ #
+ # Without this change the following message may be displayed:
+ #
+ # User-selected graph driver "overlay" overwritten by graph driver "vfs" from database - delete libpod local files to resolve
+ #
+ # However, with this change it may be replaced with the following message:
+ #
+ # User-selected graph driver "vfs" overwritten by graph driver "overlay" from database - delete libpod local files to resolve
+
+ actual_become_cmd = ['ssh', f'{scenario.user_scenario.actual.name}@localhost']
+ prime_storage_command = actual_become_cmd + prepare_prime_podman_storage()
+
+ message = ''
+
+ if scenario.expose_cgroup_v1:
+ prepare_cgroup_systemd(scenario.user_scenario.actual.name, scenario.engine)
+
+ try:
+ if prime_storage_command:
+ retry_command(lambda: run_command(*prime_storage_command), retry_any_error=True)
+
+ if scenario.disable_selinux:
+ run_command('setenforce', 'permissive')
+
+ if scenario.enable_sha1:
+ run_command('update-crypto-policies', '--set', 'DEFAULT:SHA1')
+
+ for test_command in test_commands:
+ retry_command(lambda: run_command(*test_command))
+ except SubprocessError as ex:
+ message = str(ex)
+ display.error(f'{scenario} {message}')
+ finally:
+ if scenario.enable_sha1:
+ run_command('update-crypto-policies', '--set', 'DEFAULT')
+
+ if scenario.disable_selinux:
+ run_command('setenforce', 'enforcing')
+
+ if scenario.expose_cgroup_v1:
+ dirs = remove_cgroup_systemd()
+ else:
+ dirs = list_group_systemd()
+
+ cleanup_command = [scenario.engine, 'rmi', '-f', scenario.image]
+
+ try:
+ retry_command(lambda: run_command(*client_become_cmd + [f'{format_env(common_env)}{shlex.join(cleanup_command)}']), retry_any_error=True)
+ except SubprocessError as ex:
+ display.error(str(ex))
+
+ cleanup = cleanup_podman() if scenario.engine == 'podman' else tuple()
+
+ finish = time.monotonic()
+ duration = datetime.timedelta(seconds=int(finish - start))
+
+ display.section(f'Testing {scenario} Completed in {duration}')
+
+ return TestResult(
+ scenario=scenario,
+ message=message,
+ cleanup=cleanup,
+ duration=duration,
+ cgroup_dirs=tuple(str(path) for path in dirs),
+ )
+
+
+def prepare_prime_podman_storage() -> list[str]:
+ """Partially prime podman storage and return a command to complete the remainder."""
+ prime_storage_command = ['rm -rf ~/.local/share/containers; STORAGE_DRIVER=overlay podman pull quay.io/bedrock/alpine:3.16.2']
+
+ test_containers = pathlib.Path(f'~{UNPRIVILEGED_USER_NAME}/.local/share/containers').expanduser()
+
+ if test_containers.is_dir():
+ # First remove the directory as root, since the user may not have permissions on all the files.
+ # The directory will be removed again after login, before initializing the database.
+ rmtree(test_containers)
+
+ return prime_storage_command
+
+
+def cleanup_podman() -> tuple[str, ...]:
+ """Cleanup podman processes and files on disk."""
+ cleanup = []
+
+ for remaining in range(3, -1, -1):
+ processes = [(int(item[0]), item[1]) for item in
+ [item.split(maxsplit=1) for item in run_command('ps', '-A', '-o', 'pid,comm', capture=True).stdout.splitlines()]
+ if pathlib.Path(item[1].split()[0]).name in ('catatonit', 'podman', 'conmon')]
+
+ if not processes:
+ break
+
+ for pid, name in processes:
+ display.info(f'Killing "{name}" ({pid}) ...')
+
+ try:
+ os.kill(pid, signal.SIGTERM if remaining > 1 else signal.SIGKILL)
+ except ProcessLookupError:
+ pass
+
+ cleanup.append(name)
+
+ time.sleep(1)
+ else:
+ raise Exception('failed to kill all matching processes')
+
+ uid = pwd.getpwnam(UNPRIVILEGED_USER_NAME).pw_uid
+
+ container_tmp = pathlib.Path(f'/tmp/containers-user-{uid}')
+ podman_tmp = pathlib.Path(f'/tmp/podman-run-{uid}')
+
+ user_config = pathlib.Path(f'~{UNPRIVILEGED_USER_NAME}/.config').expanduser()
+ user_local = pathlib.Path(f'~{UNPRIVILEGED_USER_NAME}/.local').expanduser()
+
+ if container_tmp.is_dir():
+ rmtree(container_tmp)
+
+ if podman_tmp.is_dir():
+ rmtree(podman_tmp)
+
+ if user_config.is_dir():
+ rmtree(user_config)
+
+ if user_local.is_dir():
+ rmtree(user_local)
+
+ return tuple(sorted(set(cleanup)))
+
+
+def have_cgroup_systemd() -> bool:
+ """Return True if the container host has a systemd cgroup."""
+ return pathlib.Path(CGROUP_SYSTEMD).is_dir()
+
+
+def prepare_cgroup_systemd(username: str, engine: str) -> None:
+ """Prepare the systemd cgroup."""
+ CGROUP_SYSTEMD.mkdir()
+
+ run_command('mount', 'cgroup', '-t', 'cgroup', str(CGROUP_SYSTEMD), '-o', 'none,name=systemd,xattr', capture=True)
+
+ if engine == 'podman':
+ run_command('chown', '-R', f'{username}:{username}', str(CGROUP_SYSTEMD))
+
+ run_command('find', str(CGROUP_SYSTEMD), '-type', 'd', '-exec', 'ls', '-l', '{}', ';')
+
+
+def list_group_systemd() -> list[pathlib.Path]:
+ """List the systemd cgroup."""
+ dirs = set()
+
+ for dirpath, dirnames, filenames in os.walk(CGROUP_SYSTEMD, topdown=False):
+ for dirname in dirnames:
+ target_path = pathlib.Path(dirpath, dirname)
+ display.info(f'dir: {target_path}')
+ dirs.add(target_path)
+
+ return sorted(dirs)
+
+
+def remove_cgroup_systemd() -> list[pathlib.Path]:
+ """Remove the systemd cgroup."""
+ dirs = set()
+
+ for sleep_seconds in range(1, 10):
+ try:
+ for dirpath, dirnames, filenames in os.walk(CGROUP_SYSTEMD, topdown=False):
+ for dirname in dirnames:
+ target_path = pathlib.Path(dirpath, dirname)
+ display.info(f'rmdir: {target_path}')
+ dirs.add(target_path)
+ target_path.rmdir()
+ except OSError as ex:
+ if ex.errno != errno.EBUSY:
+ raise
+
+ error = str(ex)
+ else:
+ break
+
+ display.warning(f'{error} -- sleeping for {sleep_seconds} second(s) before trying again ...') # pylint: disable=used-before-assignment
+
+ time.sleep(sleep_seconds)
+
+ time.sleep(1) # allow time for cgroups to be fully removed before unmounting
+
+ run_command('umount', str(CGROUP_SYSTEMD))
+
+ CGROUP_SYSTEMD.rmdir()
+
+ time.sleep(1) # allow time for cgroup hierarchy to be removed after unmounting
+
+ cgroup = pathlib.Path('/proc/self/cgroup').read_text()
+
+ if 'systemd' in cgroup:
+ raise Exception('systemd hierarchy detected')
+
+ return sorted(dirs)
+
+
+def rmtree(path: pathlib.Path) -> None:
+ """Wrapper around shutil.rmtree with additional error handling."""
+ for retries in range(10, -1, -1):
+ try:
+ display.info(f'rmtree: {path} ({retries} attempts remaining) ... ')
+ shutil.rmtree(path)
+ except Exception:
+ if not path.exists():
+ display.info(f'rmtree: {path} (not found)')
+ return
+
+ if not path.is_dir():
+ display.info(f'rmtree: {path} (not a directory)')
+ return
+
+ if retries:
+ continue
+
+ raise
+ else:
+ display.info(f'rmtree: {path} (done)')
+ return
+
+
+def format_env(env: dict[str, str]) -> str:
+ """Format an env dict for injection into a shell command and return the resulting string."""
+ if env:
+ return ' '.join(f'{shlex.quote(key)}={shlex.quote(value)}' for key, value in env.items()) + ' '
+
+ return ''
+
+
+class DockerInfo:
+ """The results of `docker info` for the container runtime."""
+
+ def __init__(self, data: dict[str, t.Any]) -> None:
+ self.data = data
+
+ @property
+ def cgroup_version(self) -> int:
+ """The cgroup version of the container host."""
+ data = self.data
+ host = data.get('host')
+
+ if host:
+ version = int(host['cgroupVersion'].lstrip('v')) # podman
+ else:
+ version = int(data['CgroupVersion']) # docker
+
+ return version
+
+
+@functools.lru_cache
+def get_docker_info(engine: str) -> DockerInfo:
+ """Return info for the current container runtime. The results are cached."""
+ return DockerInfo(json.loads(run_command(engine, 'info', '--format', '{{ json . }}', capture=True).stdout))
+
+
+@dataclasses.dataclass(frozen=True)
+class User:
+ name: str
+ pwnam: pwd.struct_passwd
+
+ @classmethod
+ def get(cls, name: str) -> User:
+ return User(
+ name=name,
+ pwnam=pwd.getpwnam(name),
+ )
+
+
+@dataclasses.dataclass(frozen=True)
+class UserScenario:
+ ssh: User = None
+ remote: User = None
+
+ @property
+ def actual(self) -> User:
+ return self.remote or self.ssh or ROOT_USER
+
+
+@dataclasses.dataclass(frozen=True)
+class TestScenario:
+ user_scenario: UserScenario
+ engine: str
+ container_name: str
+ image: str
+ disable_selinux: bool
+ expose_cgroup_v1: bool
+ enable_sha1: bool
+ debug_systemd: bool
+ probe_cgroups: bool
+
+ @property
+ def tags(self) -> tuple[str, ...]:
+ tags = []
+
+ if self.user_scenario.ssh:
+ tags.append(f'ssh: {self.user_scenario.ssh.name}')
+
+ if self.user_scenario.remote:
+ tags.append(f'remote: {self.user_scenario.remote.name}')
+
+ if self.disable_selinux:
+ tags.append('selinux: permissive')
+
+ if self.expose_cgroup_v1:
+ tags.append('cgroup: v1')
+
+ if self.enable_sha1:
+ tags.append('sha1: enabled')
+
+ return tuple(tags)
+
+ @property
+ def tag_label(self) -> str:
+ return ' '.join(f'[{tag}]' for tag in self.tags)
+
+ def __str__(self):
+ return f'[{self.container_name}] ({self.engine}) {self.tag_label}'.strip()
+
+
+@dataclasses.dataclass(frozen=True)
+class TestResult:
+ scenario: TestScenario
+ message: str
+ cleanup: tuple[str, ...]
+ duration: datetime.timedelta
+ cgroup_dirs: tuple[str, ...]
+
+
+def parse_completion_entry(value: str) -> tuple[str, dict[str, str]]:
+ """Parse the given completion entry, returning the entry name and a dictionary of key/value settings."""
+ values = value.split()
+
+ name = values[0]
+ data = {kvp[0]: kvp[1] if len(kvp) > 1 else '' for kvp in [item.split('=', 1) for item in values[1:]]}
+
+ return name, data
+
+
+@dataclasses.dataclass(frozen=True)
+class SubprocessResult:
+ """Result from execution of a subprocess."""
+
+ command: list[str]
+ stdout: str
+ stderr: str
+ status: int
+
+
+class ApplicationError(Exception):
+ """An application error."""
+
+ def __init__(self, message: str) -> None:
+ self.message = message
+
+ super().__init__(message)
+
+
+class SubprocessError(ApplicationError):
+ """An error from executing a subprocess."""
+
+ def __init__(self, result: SubprocessResult) -> None:
+ self.result = result
+
+ message = f'Command `{shlex.join(result.command)}` exited with status: {result.status}'
+
+ stdout = (result.stdout or '').strip()
+ stderr = (result.stderr or '').strip()
+
+ if stdout:
+ message += f'\n>>> Standard Output\n{stdout}'
+
+ if stderr:
+ message += f'\n>>> Standard Error\n{stderr}'
+
+ super().__init__(message)
+
+
+class ProgramNotFoundError(ApplicationError):
+ """A required program was not found."""
+
+ def __init__(self, name: str) -> None:
+ self.name = name
+
+ super().__init__(f'Missing program: {name}')
+
+
+class Display:
+ """Display interface for sending output to the console."""
+
+ CLEAR = '\033[0m'
+ RED = '\033[31m'
+ GREEN = '\033[32m'
+ YELLOW = '\033[33m'
+ BLUE = '\033[34m'
+ PURPLE = '\033[35m'
+ CYAN = '\033[36m'
+
+ def __init__(self) -> None:
+ self.sensitive: set[str] = set()
+
+ def section(self, message: str) -> None:
+ """Print a section message to the console."""
+ self.show(f'==> {message}', color=self.BLUE)
+
+ def subsection(self, message: str) -> None:
+ """Print a subsection message to the console."""
+ self.show(f'--> {message}', color=self.CYAN)
+
+ def fatal(self, message: str) -> None:
+ """Print a fatal message to the console."""
+ self.show(f'FATAL: {message}', color=self.RED)
+
+ def error(self, message: str) -> None:
+ """Print an error message to the console."""
+ self.show(f'ERROR: {message}', color=self.RED)
+
+ def warning(self, message: str) -> None:
+ """Print a warning message to the console."""
+ self.show(f'WARNING: {message}', color=self.PURPLE)
+
+ def info(self, message: str) -> None:
+ """Print an info message to the console."""
+ self.show(f'INFO: {message}', color=self.YELLOW)
+
+ def show(self, message: str, color: str | None = None) -> None:
+ """Print a message to the console."""
+ for item in self.sensitive:
+ message = message.replace(item, '*' * len(item))
+
+ print(f'{color or self.CLEAR}{message}{self.CLEAR}', flush=True)
+
+
+def run_module(
+ module: str,
+ args: dict[str, t.Any],
+) -> SubprocessResult:
+ """Run the specified Ansible module and return the result."""
+ return run_command('ansible', '-m', module, '-v', '-a', json.dumps(args), 'localhost')
+
+
+def retry_command(func: t.Callable[[], SubprocessResult], attempts: int = 3, retry_any_error: bool = False) -> SubprocessResult:
+ """Run the given command function up to the specified number of attempts when the failure is due to an SSH error."""
+ for attempts_remaining in range(attempts - 1, -1, -1):
+ try:
+ return func()
+ except SubprocessError as ex:
+ if ex.result.command[0] == 'ssh' and ex.result.status == 255 and attempts_remaining:
+ # SSH connections on our Ubuntu 22.04 host sometimes fail for unknown reasons.
+ # This retry should allow the test suite to continue, maintaining CI stability.
+ # TODO: Figure out why local SSH connections sometimes fail during the test run.
+ display.warning('Command failed due to an SSH error. Waiting a few seconds before retrying.')
+ time.sleep(3)
+ continue
+
+ if retry_any_error:
+ display.warning('Command failed. Waiting a few seconds before retrying.')
+ time.sleep(3)
+ continue
+
+ raise
+
+
+def run_command(
+ *command: str,
+ data: str | None = None,
+ stdin: int | t.IO[bytes] | None = None,
+ env: dict[str, str] | None = None,
+ capture: bool = False,
+) -> SubprocessResult:
+ """Run the specified command and return the result."""
+ stdin = subprocess.PIPE if data else stdin or subprocess.DEVNULL
+ stdout = subprocess.PIPE if capture else None
+ stderr = subprocess.PIPE if capture else None
+
+ display.subsection(f'Run command: {shlex.join(command)}')
+
+ try:
+ with subprocess.Popen(args=command, stdin=stdin, stdout=stdout, stderr=stderr, env=env, text=True) as process:
+ process_stdout, process_stderr = process.communicate(data)
+ process_status = process.returncode
+ except FileNotFoundError:
+ raise ProgramNotFoundError(command[0]) from None
+
+ result = SubprocessResult(
+ command=list(command),
+ stdout=process_stdout,
+ stderr=process_stderr,
+ status=process_status,
+ )
+
+ if process.returncode != 0:
+ raise SubprocessError(result)
+
+ return result
+
+
+class Bootstrapper(metaclass=abc.ABCMeta):
+ """Bootstrapper for remote instances."""
+
+ @classmethod
+ def install_podman(cls) -> bool:
+ """Return True if podman will be installed."""
+ return False
+
+ @classmethod
+ def install_docker(cls) -> bool:
+ """Return True if docker will be installed."""
+ return False
+
+ @classmethod
+ def usable(cls) -> bool:
+ """Return True if the bootstrapper can be used, otherwise False."""
+ return False
+
+ @classmethod
+ def init(cls) -> t.Type[Bootstrapper]:
+ """Return a bootstrapper type appropriate for the current system."""
+ for bootstrapper in cls.__subclasses__():
+ if bootstrapper.usable():
+ return bootstrapper
+
+ display.warning('No supported bootstrapper found.')
+ return Bootstrapper
+
+ @classmethod
+ def run(cls) -> None:
+ """Run the bootstrapper."""
+ cls.configure_root_user()
+ cls.configure_unprivileged_user()
+ cls.configure_source_trees()
+ cls.configure_ssh_keys()
+ cls.configure_podman_remote()
+
+ @classmethod
+ def configure_root_user(cls) -> None:
+ """Configure the root user to run tests."""
+ root_password_status = run_command('passwd', '--status', 'root', capture=True)
+ root_password_set = root_password_status.stdout.split()[1]
+
+ if root_password_set not in ('P', 'PS'):
+ root_password = run_command('openssl', 'passwd', '-5', '-stdin', data=secrets.token_hex(8), capture=True).stdout.strip()
+
+ run_module(
+ 'user',
+ dict(
+ user='root',
+ password=root_password,
+ ),
+ )
+
+ @classmethod
+ def configure_unprivileged_user(cls) -> None:
+ """Configure the unprivileged user to run tests."""
+ unprivileged_password = run_command('openssl', 'passwd', '-5', '-stdin', data=secrets.token_hex(8), capture=True).stdout.strip()
+
+ run_module(
+ 'user',
+ dict(
+ user=UNPRIVILEGED_USER_NAME,
+ password=unprivileged_password,
+ groups=['docker'] if cls.install_docker() else [],
+ append=True,
+ ),
+ )
+
+ if os_release.id == 'alpine':
+ # Most distros handle this automatically, but not Alpine.
+ # See: https://www.redhat.com/sysadmin/rootless-podman
+ start = 165535
+ end = start + 65535
+ id_range = f'{start}-{end}'
+
+ run_command(
+ 'usermod',
+ '--add-subuids',
+ id_range,
+ '--add-subgids',
+ id_range,
+ UNPRIVILEGED_USER_NAME,
+ )
+
+ @classmethod
+ def configure_source_trees(cls):
+ """Configure the source trees needed to run tests for both root and the unprivileged user."""
+ current_ansible = pathlib.Path(os.environ['PYTHONPATH']).parent
+
+ root_ansible = pathlib.Path('~').expanduser() / 'ansible'
+ test_ansible = pathlib.Path(f'~{UNPRIVILEGED_USER_NAME}').expanduser() / 'ansible'
+
+ if current_ansible != root_ansible:
+ display.info(f'copying {current_ansible} -> {root_ansible} ...')
+ rmtree(root_ansible)
+ shutil.copytree(current_ansible, root_ansible)
+ run_command('chown', '-R', 'root:root', str(root_ansible))
+
+ display.info(f'copying {current_ansible} -> {test_ansible} ...')
+ rmtree(test_ansible)
+ shutil.copytree(current_ansible, test_ansible)
+ run_command('chown', '-R', f'{UNPRIVILEGED_USER_NAME}:{UNPRIVILEGED_USER_NAME}', str(test_ansible))
+
+ paths = [pathlib.Path(test_ansible)]
+
+ for root, dir_names, file_names in os.walk(test_ansible):
+ paths.extend(pathlib.Path(root, dir_name) for dir_name in dir_names)
+ paths.extend(pathlib.Path(root, file_name) for file_name in file_names)
+
+ user = pwd.getpwnam(UNPRIVILEGED_USER_NAME)
+ uid = user.pw_uid
+ gid = user.pw_gid
+
+ for path in paths:
+ os.chown(path, uid, gid)
+
+ @classmethod
+ def configure_ssh_keys(cls) -> None:
+ """Configure SSH keys needed to run tests."""
+ user = pwd.getpwnam(UNPRIVILEGED_USER_NAME)
+ uid = user.pw_uid
+ gid = user.pw_gid
+
+ current_rsa_pub = pathlib.Path('~/.ssh/id_rsa.pub').expanduser()
+
+ test_authorized_keys = pathlib.Path(f'~{UNPRIVILEGED_USER_NAME}/.ssh/authorized_keys').expanduser()
+
+ test_authorized_keys.parent.mkdir(mode=0o755, parents=True, exist_ok=True)
+ os.chown(test_authorized_keys.parent, uid, gid)
+
+ shutil.copyfile(current_rsa_pub, test_authorized_keys)
+ os.chown(test_authorized_keys, uid, gid)
+ test_authorized_keys.chmod(mode=0o644)
+
+ @classmethod
+ def configure_podman_remote(cls) -> None:
+ """Configure podman remote support."""
+ # TODO: figure out how to support remote podman without systemd (Alpine)
+ # TODO: figure out how to support remote podman on Ubuntu
+ if os_release.id in ('alpine', 'ubuntu'):
+ return
+
+ # Support podman remote on any host with systemd available.
+ retry_command(lambda: run_command('ssh', f'{UNPRIVILEGED_USER_NAME}@localhost', 'systemctl', '--user', 'enable', '--now', 'podman.socket'))
+ run_command('loginctl', 'enable-linger', UNPRIVILEGED_USER_NAME)
+
+
+class DnfBootstrapper(Bootstrapper):
+ """Bootstrapper for dnf based systems."""
+
+ @classmethod
+ def install_podman(cls) -> bool:
+ """Return True if podman will be installed."""
+ return True
+
+ @classmethod
+ def install_docker(cls) -> bool:
+ """Return True if docker will be installed."""
+ return os_release.id != 'rhel'
+
+ @classmethod
+ def usable(cls) -> bool:
+ """Return True if the bootstrapper can be used, otherwise False."""
+ return bool(shutil.which('dnf'))
+
+ @classmethod
+ def run(cls) -> None:
+ """Run the bootstrapper."""
+ # NOTE: Install crun to make it available to podman, otherwise installing moby-engine can cause podman to use runc instead.
+ packages = ['podman', 'crun']
+
+ if cls.install_docker():
+ packages.append('moby-engine')
+
+ if os_release.id == 'fedora' and os_release.version_id == '36':
+ # In Fedora 36 the current version of netavark, 1.2.0, causes TCP connect to hang between rootfull containers.
+ # The previously tested version, 1.1.0, did not have this issue.
+ # Unfortunately, with the release of 1.2.0 the 1.1.0 package was removed from the repositories.
+ # Thankfully the 1.0.2 version is available and also works, so we'll use that here until a fixed version is available.
+ # See: https://github.com/containers/netavark/issues/491
+ packages.append('netavark-1.0.2')
+
+ if os_release.id == 'rhel':
+ # As of the release of RHEL 9.1, installing podman on RHEL 9.0 results in a non-fatal error at install time:
+ #
+ # libsemanage.semanage_pipe_data: Child process /usr/libexec/selinux/hll/pp failed with code: 255. (No such file or directory).
+ # container: libsepol.policydb_read: policydb module version 21 does not match my version range 4-20
+ # container: libsepol.sepol_module_package_read: invalid module in module package (at section 0)
+ # container: Failed to read policy package
+ # libsemanage.semanage_direct_commit: Failed to compile hll files into cil files.
+ # (No such file or directory).
+ # /usr/sbin/semodule: Failed!
+ #
+ # Unfortunately this is then fatal when running podman, resulting in no error message and a 127 return code.
+ # The solution is to update the policycoreutils package *before* installing podman.
+ #
+ # NOTE: This work-around can probably be removed once we're testing on RHEL 9.1, as the updated packages should already be installed.
+ # Unfortunately at this time there is no RHEL 9.1 AMI available (other than the Beta release).
+
+ run_command('dnf', 'update', '-y', 'policycoreutils')
+
+ run_command('dnf', 'install', '-y', *packages)
+
+ if cls.install_docker():
+ run_command('systemctl', 'start', 'docker')
+
+ if os_release.id == 'rhel' and os_release.version_id.startswith('8.'):
+ # RHEL 8 defaults to using runc instead of crun.
+ # Unfortunately runc seems to have issues with podman remote.
+ # Specifically, it tends to cause conmon to burn CPU until it reaches the specified exit delay.
+ # So we'll just change the system default to crun instead.
+ # Unfortunately we can't do this with the `--runtime` option since that doesn't work with podman remote.
+
+ conf = pathlib.Path('/usr/share/containers/containers.conf').read_text()
+
+ conf = re.sub('^runtime .*', 'runtime = "crun"', conf, flags=re.MULTILINE)
+
+ pathlib.Path('/etc/containers/containers.conf').write_text(conf)
+
+ super().run()
+
+
+class AptBootstrapper(Bootstrapper):
+ """Bootstrapper for apt based systems."""
+
+ @classmethod
+ def install_podman(cls) -> bool:
+ """Return True if podman will be installed."""
+ return not (os_release.id == 'ubuntu' and os_release.version_id == '20.04')
+
+ @classmethod
+ def install_docker(cls) -> bool:
+ """Return True if docker will be installed."""
+ return True
+
+ @classmethod
+ def usable(cls) -> bool:
+ """Return True if the bootstrapper can be used, otherwise False."""
+ return bool(shutil.which('apt-get'))
+
+ @classmethod
+ def run(cls) -> None:
+ """Run the bootstrapper."""
+ apt_env = os.environ.copy()
+ apt_env.update(
+ DEBIAN_FRONTEND='noninteractive',
+ )
+
+ packages = ['docker.io']
+
+ if cls.install_podman():
+ # NOTE: Install crun to make it available to podman, otherwise installing docker.io can cause podman to use runc instead.
+ # Using podman rootless requires the `newuidmap` and `slirp4netns` commands.
+ packages.extend(('podman', 'crun', 'uidmap', 'slirp4netns'))
+
+ run_command('apt-get', 'install', *packages, '-y', '--no-install-recommends', env=apt_env)
+
+ super().run()
+
+
+class ApkBootstrapper(Bootstrapper):
+ """Bootstrapper for apk based systems."""
+
+ @classmethod
+ def install_podman(cls) -> bool:
+ """Return True if podman will be installed."""
+ return True
+
+ @classmethod
+ def install_docker(cls) -> bool:
+ """Return True if docker will be installed."""
+ return True
+
+ @classmethod
+ def usable(cls) -> bool:
+ """Return True if the bootstrapper can be used, otherwise False."""
+ return bool(shutil.which('apk'))
+
+ @classmethod
+ def run(cls) -> None:
+ """Run the bootstrapper."""
+ # The `openssl` package is used to generate hashed passwords.
+ packages = ['docker', 'podman', 'openssl']
+
+ run_command('apk', 'add', *packages)
+ run_command('service', 'docker', 'start')
+ run_command('modprobe', 'tun')
+
+ super().run()
+
+
+@dataclasses.dataclass(frozen=True)
+class OsRelease:
+ """Operating system identification."""
+
+ id: str
+ version_id: str
+
+ @staticmethod
+ def init() -> OsRelease:
+ """Detect the current OS release and return the result."""
+ lines = run_command('sh', '-c', '. /etc/os-release && echo $ID && echo $VERSION_ID', capture=True).stdout.splitlines()
+
+ result = OsRelease(
+ id=lines[0],
+ version_id=lines[1],
+ )
+
+ display.show(f'Detected OS "{result.id}" version "{result.version_id}".')
+
+ return result
+
+
+display = Display()
+os_release = OsRelease.init()
+
+ROOT_USER = User.get('root')
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/ansible-test-container/runme.sh b/test/integration/targets/ansible-test-container/runme.sh
new file mode 100755
index 00000000..56fd6690
--- /dev/null
+++ b/test/integration/targets/ansible-test-container/runme.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+set -eu
+
+./runme.py
diff --git a/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/col/plugins/modules/invalid_yaml_syntax.py b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/col/plugins/modules/invalid_yaml_syntax.py
new file mode 100644
index 00000000..5dd753f7
--- /dev/null
+++ b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/col/plugins/modules/invalid_yaml_syntax.py
@@ -0,0 +1,27 @@
+#!/usr/bin/python
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+- key: "value"wrong
+'''
+
+EXAMPLES = '''
+- key: "value"wrong
+'''
+
+RETURN = '''
+- key: "value"wrong
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ AnsibleModule(argument_spec=dict())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/README.rst b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/README.rst
new file mode 100644
index 00000000..bf1003fa
--- /dev/null
+++ b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/README.rst
@@ -0,0 +1,3 @@
+README
+------
+This is a simple collection used to test failures with ``ansible-test sanity --test validate-modules``.
diff --git a/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/galaxy.yml b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/galaxy.yml
new file mode 100644
index 00000000..3b116713
--- /dev/null
+++ b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/galaxy.yml
@@ -0,0 +1,6 @@
+namespace: ns
+name: failure
+version: 1.0.0
+readme: README.rst
+authors:
+ - Ansible
diff --git a/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/meta/main.yml b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/meta/main.yml
new file mode 100644
index 00000000..1602a255
--- /dev/null
+++ b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/meta/main.yml
@@ -0,0 +1 @@
+requires_ansible: '>=2.9'
diff --git a/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/plugins/modules/failure_ps.ps1 b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/plugins/modules/failure_ps.ps1
new file mode 100644
index 00000000..6ec04393
--- /dev/null
+++ b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/plugins/modules/failure_ps.ps1
@@ -0,0 +1,16 @@
+#!powershell
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#AnsibleRequires -CSharpUtil Ansible.Basic
+
+throw "test inner error message"
+
+$module = [Ansible.Basic.AnsibleModule]::Create($args, @{
+ options = @{
+ test = @{ type = 'str'; choices = @('foo', 'bar'); default = 'foo' }
+ }
+ })
+
+$module.Result.test = 'abc'
+
+$module.ExitJson()
diff --git a/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/plugins/modules/failure_ps.yml b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/plugins/modules/failure_ps.yml
new file mode 100644
index 00000000..c657ec9b
--- /dev/null
+++ b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/failure/plugins/modules/failure_ps.yml
@@ -0,0 +1,31 @@
+# Copyright (c) 2022 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION:
+ module: failure_ps
+ short_description: Short description for failure_ps module
+ description:
+ - Description for failure_ps module
+ options:
+ test:
+ description:
+ - Description for test module option
+ type: str
+ choices:
+ - foo
+ - bar
+ default: foo
+ author:
+ - Ansible Core Team
+
+EXAMPLES: |
+ - name: example for failure_ps
+ ns.col.failure_ps:
+ test: bar
+
+RETURN:
+ test:
+ description: The test return value
+ returned: always
+ type: str
+ sample: abc
diff --git a/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/plugins/module_utils/share_module.psm1 b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/plugins/module_utils/share_module.psm1
new file mode 100644
index 00000000..1e8ff905
--- /dev/null
+++ b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/plugins/module_utils/share_module.psm1
@@ -0,0 +1,19 @@
+#AnsibleRequires -CSharpUtil Ansible.Basic
+
+Function Invoke-AnsibleModule {
+ <#
+ .SYNOPSIS
+ validate
+ #>
+ [CmdletBinding()]
+ param ()
+
+ $module = [Ansible.Basic.AnsibleModule]::Create(@(), @{
+ options = @{
+ test = @{ type = 'str' }
+ }
+ })
+ $module.ExitJson()
+}
+
+Export-ModuleMember -Function Invoke-AnsibleModule
diff --git a/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/plugins/modules/in_function.ps1 b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/plugins/modules/in_function.ps1
new file mode 100644
index 00000000..8f74edcc
--- /dev/null
+++ b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/plugins/modules/in_function.ps1
@@ -0,0 +1,7 @@
+#!powershell
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#AnsibleRequires -CSharpUtil Ansible.Basic
+#AnsibleRequires -PowerShell ..module_utils.share_module
+
+Invoke-AnsibleModule
diff --git a/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/plugins/modules/in_function.yml b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/plugins/modules/in_function.yml
new file mode 100644
index 00000000..87d3ec77
--- /dev/null
+++ b/test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/ps_only/plugins/modules/in_function.yml
@@ -0,0 +1,25 @@
+# Copyright (c) 2022 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+DOCUMENTATION:
+ module: in_function
+ short_description: Short description for in_function module
+ description:
+ - Description for in_function module
+ options:
+ test:
+ description: Description for test
+ type: str
+ author:
+ - Ansible Core Team
+
+EXAMPLES: |
+ - name: example for sidecar
+ ns.col.in_function:
+
+RETURN:
+ test:
+ description: The test return value
+ returned: always
+ type: str
+ sample: abc
diff --git a/test/integration/targets/ansible-test-sanity-validate-modules/expected.txt b/test/integration/targets/ansible-test-sanity-validate-modules/expected.txt
new file mode 100644
index 00000000..95f12f39
--- /dev/null
+++ b/test/integration/targets/ansible-test-sanity-validate-modules/expected.txt
@@ -0,0 +1,5 @@
+plugins/modules/invalid_yaml_syntax.py:0:0: deprecation-mismatch: "meta/runtime.yml" and DOCUMENTATION.deprecation do not agree.
+plugins/modules/invalid_yaml_syntax.py:0:0: missing-documentation: No DOCUMENTATION provided
+plugins/modules/invalid_yaml_syntax.py:8:15: documentation-syntax-error: DOCUMENTATION is not valid YAML
+plugins/modules/invalid_yaml_syntax.py:12:15: invalid-examples: EXAMPLES is not valid YAML
+plugins/modules/invalid_yaml_syntax.py:16:15: return-syntax-error: RETURN is not valid YAML
diff --git a/test/integration/targets/ansible-test-sanity-validate-modules/runme.sh b/test/integration/targets/ansible-test-sanity-validate-modules/runme.sh
index 1b051b3a..e0299969 100755
--- a/test/integration/targets/ansible-test-sanity-validate-modules/runme.sh
+++ b/test/integration/targets/ansible-test-sanity-validate-modules/runme.sh
@@ -4,7 +4,9 @@ source ../collection/setup.sh
set -eux
-ansible-test sanity --test validate-modules --color --truncate 0 "${@}"
+ansible-test sanity --test validate-modules --color --truncate 0 --failure-ok --lint "${@}" 1> actual-stdout.txt 2> actual-stderr.txt
+diff -u "${TEST_DIR}/expected.txt" actual-stdout.txt
+grep -f "${TEST_DIR}/expected.txt" actual-stderr.txt
cd ../ps_only
@@ -15,3 +17,18 @@ fi
# Use a PowerShell-only collection to verify that validate-modules does not load the collection loader multiple times.
ansible-test sanity --test validate-modules --color --truncate 0 "${@}"
+
+cd ../failure
+
+if ansible-test sanity --test validate-modules --color --truncate 0 "${@}" 1> ansible-stdout.txt 2> ansible-stderr.txt; then
+ echo "ansible-test sanity for failure should cause failure"
+ exit 1
+fi
+
+cat ansible-stdout.txt
+grep -q "ERROR: plugins/modules/failure_ps.ps1:0:0: import-error: Exception attempting to import module for argument_spec introspection" < ansible-stdout.txt
+grep -q "test inner error message" < ansible-stdout.txt
+
+cat ansible-stderr.txt
+grep -q "FATAL: The 1 sanity test(s) listed below (out of 1) failed" < ansible-stderr.txt
+grep -q "validate-modules" < ansible-stderr.txt
diff --git a/test/integration/targets/argspec/library/argspec.py b/test/integration/targets/argspec/library/argspec.py
index 1a1d288d..b6d6d110 100644
--- a/test/integration/targets/argspec/library/argspec.py
+++ b/test/integration/targets/argspec/library/argspec.py
@@ -34,7 +34,7 @@ def main():
'elements': 'dict',
'options': {
'thing': {},
- 'other': {},
+ 'other': {'aliases': ['other_alias']},
},
},
'required_by': {
@@ -136,9 +136,111 @@ def main():
'bar': {
'type': 'str',
'default': 'baz',
+ 'aliases': ['bar_alias1', 'bar_alias2'],
+ },
+ },
+ },
+ 'deprecation_aliases': {
+ 'type': 'str',
+ 'aliases': [
+ 'deprecation_aliases_version',
+ 'deprecation_aliases_date',
+ ],
+ 'deprecated_aliases': [
+ {
+ 'name': 'deprecation_aliases_version',
+ 'version': '2.0.0',
+ 'collection_name': 'foo.bar',
+ },
+ {
+ 'name': 'deprecation_aliases_date',
+ 'date': '2023-01-01',
+ 'collection_name': 'foo.bar',
+ },
+ ],
+ },
+ 'deprecation_param_version': {
+ 'type': 'str',
+ 'removed_in_version': '2.0.0',
+ 'removed_from_collection': 'foo.bar',
+ },
+ 'deprecation_param_date': {
+ 'type': 'str',
+ 'removed_at_date': '2023-01-01',
+ 'removed_from_collection': 'foo.bar',
+ },
+ 'subdeprecation': {
+ 'aliases': [
+ 'subdeprecation_alias',
+ ],
+ 'type': 'dict',
+ 'options': {
+ 'deprecation_aliases': {
+ 'type': 'str',
+ 'aliases': [
+ 'deprecation_aliases_version',
+ 'deprecation_aliases_date',
+ ],
+ 'deprecated_aliases': [
+ {
+ 'name': 'deprecation_aliases_version',
+ 'version': '2.0.0',
+ 'collection_name': 'foo.bar',
+ },
+ {
+ 'name': 'deprecation_aliases_date',
+ 'date': '2023-01-01',
+ 'collection_name': 'foo.bar',
+ },
+ ],
+ },
+ 'deprecation_param_version': {
+ 'type': 'str',
+ 'removed_in_version': '2.0.0',
+ 'removed_from_collection': 'foo.bar',
+ },
+ 'deprecation_param_date': {
+ 'type': 'str',
+ 'removed_at_date': '2023-01-01',
+ 'removed_from_collection': 'foo.bar',
},
},
},
+ 'subdeprecation_list': {
+ 'type': 'list',
+ 'elements': 'dict',
+ 'options': {
+ 'deprecation_aliases': {
+ 'type': 'str',
+ 'aliases': [
+ 'deprecation_aliases_version',
+ 'deprecation_aliases_date',
+ ],
+ 'deprecated_aliases': [
+ {
+ 'name': 'deprecation_aliases_version',
+ 'version': '2.0.0',
+ 'collection_name': 'foo.bar',
+ },
+ {
+ 'name': 'deprecation_aliases_date',
+ 'date': '2023-01-01',
+ 'collection_name': 'foo.bar',
+ },
+ ],
+ },
+ 'deprecation_param_version': {
+ 'type': 'str',
+ 'removed_in_version': '2.0.0',
+ 'removed_from_collection': 'foo.bar',
+ },
+ 'deprecation_param_date': {
+ 'type': 'str',
+ 'removed_at_date': '2023-01-01',
+ 'removed_from_collection': 'foo.bar',
+ },
+ },
+ }
},
required_if=(
('state', 'present', ('path', 'content'), True),
diff --git a/test/integration/targets/argspec/tasks/main.yml b/test/integration/targets/argspec/tasks/main.yml
index 283c922d..6e8ec054 100644
--- a/test/integration/targets/argspec/tasks/main.yml
+++ b/test/integration/targets/argspec/tasks/main.yml
@@ -366,6 +366,130 @@
foo: bar
register: argspec_apply_defaults_one
+- argspec:
+ required: value
+ required_one_of_one: value
+ deprecation_aliases_version: value
+ register: deprecation_alias_version
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ deprecation_aliases_date: value
+ register: deprecation_alias_date
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ deprecation_param_version: value
+ register: deprecation_param_version
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ deprecation_param_date: value
+ register: deprecation_param_date
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ subdeprecation:
+ deprecation_aliases_version: value
+ register: sub_deprecation_alias_version
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ subdeprecation:
+ deprecation_aliases_date: value
+ register: sub_deprecation_alias_date
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ subdeprecation:
+ deprecation_param_version: value
+ register: sub_deprecation_param_version
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ subdeprecation:
+ deprecation_param_date: value
+ register: sub_deprecation_param_date
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ subdeprecation_alias:
+ deprecation_aliases_version: value
+ register: subalias_deprecation_alias_version
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ subdeprecation_alias:
+ deprecation_aliases_date: value
+ register: subalias_deprecation_alias_date
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ subdeprecation_alias:
+ deprecation_param_version: value
+ register: subalias_deprecation_param_version
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ subdeprecation_alias:
+ deprecation_param_date: value
+ register: subalias_deprecation_param_date
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ subdeprecation_list:
+ - deprecation_aliases_version: value
+ register: sublist_deprecation_alias_version
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ subdeprecation_list:
+ - deprecation_aliases_date: value
+ register: sublist_deprecation_alias_date
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ subdeprecation_list:
+ - deprecation_param_version: value
+ register: sublist_deprecation_param_version
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ subdeprecation_list:
+ - deprecation_param_date: value
+ register: sublist_deprecation_param_date
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ apply_defaults:
+ bar_alias1: foo
+ bar_alias2: baz
+ register: alias_warning_dict
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ required_one_of:
+ - other: foo
+ other_alias: bar
+ register: alias_warning_listdict
+
- assert:
that:
- argspec_required_fail is failed
@@ -446,3 +570,90 @@
- "argspec_apply_defaults_none.apply_defaults == {'foo': none, 'bar': 'baz'}"
- "argspec_apply_defaults_empty.apply_defaults == {'foo': none, 'bar': 'baz'}"
- "argspec_apply_defaults_one.apply_defaults == {'foo': 'bar', 'bar': 'baz'}"
+
+ - deprecation_alias_version.deprecations | length == 1
+ - deprecation_alias_version.deprecations[0].msg == "Alias 'deprecation_aliases_version' is deprecated. See the module docs for more information"
+ - deprecation_alias_version.deprecations[0].collection_name == 'foo.bar'
+ - deprecation_alias_version.deprecations[0].version == '2.0.0'
+ - "'date' not in deprecation_alias_version.deprecations[0]"
+ - deprecation_alias_date.deprecations | length == 1
+ - deprecation_alias_date.deprecations[0].msg == "Alias 'deprecation_aliases_date' is deprecated. See the module docs for more information"
+ - deprecation_alias_date.deprecations[0].collection_name == 'foo.bar'
+ - deprecation_alias_date.deprecations[0].date == '2023-01-01'
+ - "'version' not in deprecation_alias_date.deprecations[0]"
+ - deprecation_param_version.deprecations | length == 1
+ - deprecation_param_version.deprecations[0].msg == "Param 'deprecation_param_version' is deprecated. See the module docs for more information"
+ - deprecation_param_version.deprecations[0].collection_name == 'foo.bar'
+ - deprecation_param_version.deprecations[0].version == '2.0.0'
+ - "'date' not in deprecation_param_version.deprecations[0]"
+ - deprecation_param_date.deprecations | length == 1
+ - deprecation_param_date.deprecations[0].msg == "Param 'deprecation_param_date' is deprecated. See the module docs for more information"
+ - deprecation_param_date.deprecations[0].collection_name == 'foo.bar'
+ - deprecation_param_date.deprecations[0].date == '2023-01-01'
+ - "'version' not in deprecation_param_date.deprecations[0]"
+
+ - sub_deprecation_alias_version.deprecations | length == 1
+ - sub_deprecation_alias_version.deprecations[0].msg == "Alias 'subdeprecation.deprecation_aliases_version' is deprecated. See the module docs for more information"
+ - sub_deprecation_alias_version.deprecations[0].collection_name == 'foo.bar'
+ - sub_deprecation_alias_version.deprecations[0].version == '2.0.0'
+ - "'date' not in sub_deprecation_alias_version.deprecations[0]"
+ - sub_deprecation_alias_date.deprecations | length == 1
+ - sub_deprecation_alias_date.deprecations[0].msg == "Alias 'subdeprecation.deprecation_aliases_date' is deprecated. See the module docs for more information"
+ - sub_deprecation_alias_date.deprecations[0].collection_name == 'foo.bar'
+ - sub_deprecation_alias_date.deprecations[0].date == '2023-01-01'
+ - "'version' not in sub_deprecation_alias_date.deprecations[0]"
+ - sub_deprecation_param_version.deprecations | length == 1
+ - sub_deprecation_param_version.deprecations[0].msg == "Param 'subdeprecation[\"deprecation_param_version\"]' is deprecated. See the module docs for more information"
+ - sub_deprecation_param_version.deprecations[0].collection_name == 'foo.bar'
+ - sub_deprecation_param_version.deprecations[0].version == '2.0.0'
+ - "'date' not in sub_deprecation_param_version.deprecations[0]"
+ - sub_deprecation_param_date.deprecations | length == 1
+ - sub_deprecation_param_date.deprecations[0].msg == "Param 'subdeprecation[\"deprecation_param_date\"]' is deprecated. See the module docs for more information"
+ - sub_deprecation_param_date.deprecations[0].collection_name == 'foo.bar'
+ - sub_deprecation_param_date.deprecations[0].date == '2023-01-01'
+ - "'version' not in sub_deprecation_param_date.deprecations[0]"
+
+ - subalias_deprecation_alias_version.deprecations | length == 1
+ - subalias_deprecation_alias_version.deprecations[0].msg == "Alias 'subdeprecation.deprecation_aliases_version' is deprecated. See the module docs for more information"
+ - subalias_deprecation_alias_version.deprecations[0].collection_name == 'foo.bar'
+ - subalias_deprecation_alias_version.deprecations[0].version == '2.0.0'
+ - "'date' not in subalias_deprecation_alias_version.deprecations[0]"
+ - subalias_deprecation_alias_date.deprecations | length == 1
+ - subalias_deprecation_alias_date.deprecations[0].msg == "Alias 'subdeprecation.deprecation_aliases_date' is deprecated. See the module docs for more information"
+ - subalias_deprecation_alias_date.deprecations[0].collection_name == 'foo.bar'
+ - subalias_deprecation_alias_date.deprecations[0].date == '2023-01-01'
+ - "'version' not in subalias_deprecation_alias_date.deprecations[0]"
+ - subalias_deprecation_param_version.deprecations | length == 1
+ - subalias_deprecation_param_version.deprecations[0].msg == "Param 'subdeprecation[\"deprecation_param_version\"]' is deprecated. See the module docs for more information"
+ - subalias_deprecation_param_version.deprecations[0].collection_name == 'foo.bar'
+ - subalias_deprecation_param_version.deprecations[0].version == '2.0.0'
+ - "'date' not in subalias_deprecation_param_version.deprecations[0]"
+ - subalias_deprecation_param_date.deprecations | length == 1
+ - subalias_deprecation_param_date.deprecations[0].msg == "Param 'subdeprecation[\"deprecation_param_date\"]' is deprecated. See the module docs for more information"
+ - subalias_deprecation_param_date.deprecations[0].collection_name == 'foo.bar'
+ - subalias_deprecation_param_date.deprecations[0].date == '2023-01-01'
+ - "'version' not in subalias_deprecation_param_date.deprecations[0]"
+
+ - sublist_deprecation_alias_version.deprecations | length == 1
+ - sublist_deprecation_alias_version.deprecations[0].msg == "Alias 'subdeprecation_list[0].deprecation_aliases_version' is deprecated. See the module docs for more information"
+ - sublist_deprecation_alias_version.deprecations[0].collection_name == 'foo.bar'
+ - sublist_deprecation_alias_version.deprecations[0].version == '2.0.0'
+ - "'date' not in sublist_deprecation_alias_version.deprecations[0]"
+ - sublist_deprecation_alias_date.deprecations | length == 1
+ - sublist_deprecation_alias_date.deprecations[0].msg == "Alias 'subdeprecation_list[0].deprecation_aliases_date' is deprecated. See the module docs for more information"
+ - sublist_deprecation_alias_date.deprecations[0].collection_name == 'foo.bar'
+ - sublist_deprecation_alias_date.deprecations[0].date == '2023-01-01'
+ - "'version' not in sublist_deprecation_alias_date.deprecations[0]"
+ - sublist_deprecation_param_version.deprecations | length == 1
+ - sublist_deprecation_param_version.deprecations[0].msg == "Param 'subdeprecation_list[\"deprecation_param_version\"]' is deprecated. See the module docs for more information"
+ - sublist_deprecation_param_version.deprecations[0].collection_name == 'foo.bar'
+ - sublist_deprecation_param_version.deprecations[0].version == '2.0.0'
+ - "'date' not in sublist_deprecation_param_version.deprecations[0]"
+ - sublist_deprecation_param_date.deprecations | length == 1
+ - sublist_deprecation_param_date.deprecations[0].msg == "Param 'subdeprecation_list[\"deprecation_param_date\"]' is deprecated. See the module docs for more information"
+ - sublist_deprecation_param_date.deprecations[0].collection_name == 'foo.bar'
+ - sublist_deprecation_param_date.deprecations[0].date == '2023-01-01'
+ - "'version' not in sublist_deprecation_param_date.deprecations[0]"
+
+ - "'Both option apply_defaults.bar and its alias apply_defaults.bar_alias2 are set.' in alias_warning_dict.warnings"
+ - "'Both option required_one_of[0].other and its alias required_one_of[0].other_alias are set.' in alias_warning_listdict.warnings"
diff --git a/test/integration/targets/blocks/79711.yml b/test/integration/targets/blocks/79711.yml
new file mode 100644
index 00000000..ca9bfbb4
--- /dev/null
+++ b/test/integration/targets/blocks/79711.yml
@@ -0,0 +1,17 @@
+- hosts: localhost
+ gather_facts: false
+ tasks:
+ - block:
+ - block:
+ - debug:
+ - name: EXPECTED FAILURE
+ fail:
+ rescue:
+ - debug:
+ - debug:
+ - name: EXPECTED FAILURE
+ fail:
+ always:
+ - debug:
+ always:
+ - debug:
diff --git a/test/integration/targets/blocks/runme.sh b/test/integration/targets/blocks/runme.sh
index 06e3ddee..820107bb 100755
--- a/test/integration/targets/blocks/runme.sh
+++ b/test/integration/targets/blocks/runme.sh
@@ -127,3 +127,12 @@ rm -f 78612.out
ansible-playbook -vv 43191.yml
ansible-playbook -vv 43191-2.yml
+
+# https://github.com/ansible/ansible/issues/79711
+set +e
+ANSIBLE_FORCE_HANDLERS=0 ansible-playbook -vv 79711.yml | tee 79711.out
+set -e
+[ "$(grep -c 'ok=5' 79711.out)" -eq 1 ]
+[ "$(grep -c 'failed=1' 79711.out)" -eq 1 ]
+[ "$(grep -c 'rescued=1' 79711.out)" -eq 1 ]
+rm -f 79711.out
diff --git a/test/integration/targets/file/tasks/main.yml b/test/integration/targets/file/tasks/main.yml
index 3aed4917..17b0fae6 100644
--- a/test/integration/targets/file/tasks/main.yml
+++ b/test/integration/targets/file/tasks/main.yml
@@ -711,6 +711,82 @@
- group_exists.warnings is not defined
- group_gid_exists.warnings is not defined
+# ensures touching a file returns changed when needed
+# issue: https://github.com/ansible/ansible/issues/79360
+- name: touch a file returns changed in check mode if file does not exist
+ file:
+ path: '/tmp/touch_check_mode_test'
+ state: touch
+ check_mode: yes
+ register: touch_result_in_check_mode_not_existing
+
+- name: touch the file
+ file:
+ path: '/tmp/touch_check_mode_test'
+ mode: "0660"
+ state: touch
+
+- name: touch an existing file returns changed in check mode
+ file:
+ path: '/tmp/touch_check_mode_test'
+ state: touch
+ check_mode: yes
+ register: touch_result_in_check_mode_change_all_attr
+
+- name: touch an existing file returns changed in check mode when preserving access time
+ file:
+ path: '/tmp/touch_check_mode_test'
+ state: touch
+ access_time: "preserve"
+ check_mode: yes
+ register: touch_result_in_check_mode_preserve_access_time
+
+- name: touch an existing file returns changed in check mode when only mode changes
+ file:
+ path: '/tmp/touch_check_mode_test'
+ state: touch
+ access_time: "preserve"
+ modification_time: "preserve"
+ mode: "0640"
+ check_mode: yes
+ register: touch_result_in_check_mode_change_only_mode
+
+- name: touch an existing file returns ok if all attributes are preserved
+ file:
+ path: '/tmp/touch_check_mode_test'
+ state: touch
+ access_time: "preserve"
+ modification_time: "preserve"
+ check_mode: yes
+ register: touch_result_in_check_mode_all_attrs_preserved
+
+- name: touch an existing file fails in check mode when user does not exist
+ file:
+ path: '/tmp/touch_check_mode_test'
+ state: touch
+ owner: not-existing-user
+ check_mode: yes
+ ignore_errors: true
+ register: touch_result_in_check_mode_fails_not_existing_user
+
+- name: touch an existing file fails in check mode when group does not exist
+ file:
+ path: '/tmp/touch_check_mode_test'
+ state: touch
+ group: not-existing-group
+ check_mode: yes
+ ignore_errors: true
+ register: touch_result_in_check_mode_fails_not_existing_group
+
+- assert:
+ that:
+ - touch_result_in_check_mode_not_existing.changed
+ - touch_result_in_check_mode_preserve_access_time.changed
+ - touch_result_in_check_mode_change_only_mode.changed
+ - not touch_result_in_check_mode_all_attrs_preserved.changed
+ - touch_result_in_check_mode_fails_not_existing_user.warnings[0] is search('failed to look up user')
+ - touch_result_in_check_mode_fails_not_existing_group.warnings[0] is search('failed to look up group')
+
# https://github.com/ansible/ansible/issues/50943
# Need to use /tmp as nobody can't access remote_tmp_dir_test at all
- name: create file as root with all write permissions
diff --git a/test/integration/targets/get_url/tasks/ciphers.yml b/test/integration/targets/get_url/tasks/ciphers.yml
index b8ebd981..c7d9979d 100644
--- a/test/integration/targets/get_url/tasks/ciphers.yml
+++ b/test/integration/targets/get_url/tasks/ciphers.yml
@@ -6,7 +6,7 @@
register: good_ciphers
- name: test bad cipher
- uri:
+ get_url:
url: https://{{ httpbin_host }}/get
ciphers: ECDHE-ECDSA-AES128-SHA
dest: '{{ remote_tmp_dir }}/bad_cipher_get.json'
diff --git a/test/integration/targets/inventory_script/inventory.json b/test/integration/targets/inventory_script/inventory.json
index 5046a9a8..69ba5476 100644
--- a/test/integration/targets/inventory_script/inventory.json
+++ b/test/integration/targets/inventory_script/inventory.json
@@ -1029,9 +1029,9 @@
},
"all": {
"children": [
+ "ungrouped",
"None",
- "guests",
- "ungrouped"
+ "guests"
]
},
"guests": {
diff --git a/test/integration/targets/reboot/aliases b/test/integration/targets/reboot/aliases
index e9bebbf3..7f995fd6 100644
--- a/test/integration/targets/reboot/aliases
+++ b/test/integration/targets/reboot/aliases
@@ -1,2 +1,5 @@
-# No current way to split controller and test node
-unsupported
+context/target
+destructive
+needs/root
+shippable/posix/group2
+skip/docker
diff --git a/test/integration/targets/reboot/tasks/main.yml b/test/integration/targets/reboot/tasks/main.yml
index 7687cb73..4884f104 100644
--- a/test/integration/targets/reboot/tasks/main.yml
+++ b/test/integration/targets/reboot/tasks/main.yml
@@ -1,39 +1,41 @@
-- name: Test reboot
- when: ansible_facts.virtualization_type | default('') not in ['docker', 'container', 'containerd']
- block:
- # This block can be removed once we have a mechanism in ansible-test to separate
- # the control node from the managed node.
- - block:
- - name: Write temp file for sanity checking this is not the controller
- copy:
- content: 'I am the control node'
- dest: /tmp/Anything-Nutlike-Nuzzle-Plow-Overdue
- delegate_to: localhost
- connection: local
- when: inventory_hostname == ansible_play_hosts[0]
+- name: Check split state
+ stat:
+ path: "{{ output_dir }}"
+ register: split
+ ignore_errors: yes
+
+- name: >-
+ Memorize whether we're in a containerized environment
+ and/or a split controller mode
+ set_fact:
+ in_container_env: >-
+ {{
+ ansible_facts.virtualization_type | default('')
+ in ['docker', 'container', 'containerd']
+ }}
+ in_split_controller_mode: >-
+ {{ split is not success or not split.stat.exists }}
- - name: See if the temp file exists on the managed node
- stat:
- path: /tmp/Anything-Nutlike-Nuzzle-Plow-Overdue
- register: controller_temp_file
+- name: Explain why testing against a container is not an option
+ debug:
+ msg: >-
+ This test is attempting to reboot the whole host operating system.
+ The current target is a containerized environment. Containers
+ cannot be reboot like VMs. This is why the test is being skipped.
+ when: in_container_env
- - name: EXPECT FAILURE | Check if the managed node is the control node
- assert:
- msg: >
- This test must be run manually by modifying the inventory file to point
- "{{ inventory_hostname }}" at a remote host rather than "{{ ansible_host }}".
- Skipping reboot test.
- that:
- - not controller_temp_file.stat.exists
- always:
- - name: Cleanup temp file
- file:
- path: /tmp/Anything-Nutlike-Nuzzle-Plow-Overdue
- state: absent
- delegate_to: localhost
- connection: local
- when: inventory_hostname == ansible_play_hosts[0]
+- name: Explain why testing against the same host is not an option
+ debug:
+ msg: >-
+ This test is attempting to reboot the whole host operating system.
+ This means it would interrupt itself trying to reboot own
+ environment. It needs to target a separate VM or machine to be
+ able to function so it's being skipped in the current invocation.
+ when: not in_split_controller_mode
+- name: Test reboot
+ when: not in_container_env and in_split_controller_mode
+ block:
- import_tasks: test_standard_scenarios.yml
- import_tasks: test_reboot_command.yml
- import_tasks: test_invalid_parameter.yml
diff --git a/test/integration/targets/roles_arg_spec/test_complex_role_fails.yml b/test/integration/targets/roles_arg_spec/test_complex_role_fails.yml
index 8764d382..81abdaa8 100644
--- a/test/integration/targets/roles_arg_spec/test_complex_role_fails.yml
+++ b/test/integration/targets/roles_arg_spec/test_complex_role_fails.yml
@@ -168,3 +168,30 @@
- ansible_failed_result.validate_args_context.name == "test1"
- ansible_failed_result.validate_args_context.type == "role"
- "ansible_failed_result.validate_args_context.path is search('roles_arg_spec/roles/test1')"
+
+ - name: test message for missing required parameters and invalid suboptions
+ block:
+ - include_role:
+ name: test1
+ vars:
+ some_json: '{}'
+ some_jsonarg: '{}'
+ multi_level_option:
+ second_level:
+ not_a_supported_suboption: true
+
+ - fail:
+ msg: "Should not get here"
+
+ rescue:
+ - debug:
+ var: ansible_failed_result
+
+ - assert:
+ that:
+ - ansible_failed_result.argument_errors | length == 2
+ - missing_required in ansible_failed_result.argument_errors
+ - got_unexpected in ansible_failed_result.argument_errors
+ vars:
+ missing_required: "missing required arguments: third_level found in multi_level_option -> second_level"
+ got_unexpected: "multi_level_option.second_level.not_a_supported_suboption. Supported parameters include: third_level."
diff --git a/test/integration/targets/setup_epel/tasks/main.yml b/test/integration/targets/setup_epel/tasks/main.yml
index ba0eae30..a8593bb4 100644
--- a/test/integration/targets/setup_epel/tasks/main.yml
+++ b/test/integration/targets/setup_epel/tasks/main.yml
@@ -1,3 +1,8 @@
+- name: Enable RHEL7 extras
+ # EPEL 7 depends on RHEL 7 extras, which is not enabled by default on RHEL.
+ # See: https://docs.fedoraproject.org/en-US/epel/epel-policy/#_policy
+ command: yum-config-manager --enable rhel-7-server-rhui-extras-rpms
+ when: ansible_facts.distribution == 'RedHat' and ansible_facts.distribution_major_version == '7'
- name: Install EPEL
yum:
name: https://ci-files.testing.ansible.com/test/integration/targets/setup_epel/epel-release-latest-{{ ansible_distribution_major_version }}.noarch.rpm
diff --git a/test/integration/targets/var_templating/ansible_debug_template.j2 b/test/integration/targets/var_templating/ansible_debug_template.j2
new file mode 100644
index 00000000..8fe25f99
--- /dev/null
+++ b/test/integration/targets/var_templating/ansible_debug_template.j2
@@ -0,0 +1 @@
+{{ hello }}
diff --git a/test/integration/targets/var_templating/runme.sh b/test/integration/targets/var_templating/runme.sh
index 9363cb3a..bcf09241 100755
--- a/test/integration/targets/var_templating/runme.sh
+++ b/test/integration/targets/var_templating/runme.sh
@@ -16,3 +16,6 @@ ansible-playbook task_vars_templating.yml -v "$@"
# there should be an attempt to use 'sudo' in the connection debug output
ANSIBLE_BECOME_ALLOW_SAME_USER=true ansible-playbook test_connection_vars.yml -vvvv "$@" | tee /dev/stderr | grep 'sudo \-H \-S'
+
+# smoke test usage of VarsWithSources that is used when ANSIBLE_DEBUG=1
+ANSIBLE_DEBUG=1 ansible-playbook test_vars_with_sources.yml -v "$@"
diff --git a/test/integration/targets/var_templating/test_vars_with_sources.yml b/test/integration/targets/var_templating/test_vars_with_sources.yml
new file mode 100644
index 00000000..0b8c990e
--- /dev/null
+++ b/test/integration/targets/var_templating/test_vars_with_sources.yml
@@ -0,0 +1,9 @@
+- hosts: localhost
+ gather_facts: false
+ tasks:
+ - template:
+ src: ansible_debug_template.j2
+ dest: "{{ output_dir }}/ansible_debug_templated.txt"
+ vars:
+ output_dir: "{{ lookup('env', 'OUTPUT_DIR') }}"
+ hello: hello
diff --git a/test/integration/targets/yum/tasks/yuminstallroot.yml b/test/integration/targets/yum/tasks/yuminstallroot.yml
index bb69151a..028e8059 100644
--- a/test/integration/targets/yum/tasks/yuminstallroot.yml
+++ b/test/integration/targets/yum/tasks/yuminstallroot.yml
@@ -76,13 +76,6 @@
- ansible_facts["distribution_major_version"] == "7"
- ansible_facts["distribution"] == "RedHat"
block:
- # Need to enable this RHUI repo for RHEL7 testing in AWS, CentOS has Extras
- # enabled by default and this is not needed there.
- - name: enable rhel-7-server-rhui-extras-rpms repo for RHEL7
- command: yum-config-manager --enable rhel-7-server-rhui-extras-rpms
- - name: update cache to pull repodata
- yum:
- update_cache: yes
- name: install required packages for buildah test
yum:
state: present
@@ -137,5 +130,3 @@
state: absent
name:
- buildah
- - name: disable rhel-7-server-rhui-extras-rpms repo for RHEL7
- command: yum-config-manager --disable rhel-7-server-rhui-extras-rpms